hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7173f406336288e5f91314047a20cea6d7099ba | 29,908 | py | Python | tests/test_sklearn_pipeline.py | vumichien/hummingbird | 8981e11ce2536167c329a5d9d20e81125a792fe4 | [
"MIT"
] | 2,772 | 2020-05-04T21:03:40.000Z | 2022-03-30T11:00:03.000Z | tests/test_sklearn_pipeline.py | vumichien/hummingbird | 8981e11ce2536167c329a5d9d20e81125a792fe4 | [
"MIT"
] | 486 | 2020-05-05T00:45:44.000Z | 2022-03-15T01:02:31.000Z | tests/test_sklearn_pipeline.py | vumichien/hummingbird | 8981e11ce2536167c329a5d9d20e81125a792fe4 | [
"MIT"
] | 232 | 2019-11-02T22:06:38.000Z | 2022-03-25T07:36:17.000Z | import unittest
import numpy as np
from sklearn import datasets
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_iris, load_diabetes
from sklearn.svm import LinearSVC, LinearSVR
from sklearn.datasets import make_regression
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression, RidgeCV
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler
import hummingbird.ml
from hummingbird.ml._utils import pandas_installed, onnx_runtime_installed
from hummingbird.ml import constants
from onnxconverter_common.data_types import (
FloatTensorType,
Int64TensorType,
StringTensorType,
)
try:
from sklearn.impute import SimpleImputer
except ImportError:
from sklearn.preprocessing import Imputer as SimpleImputer
try:
from sklearn.ensemble import StackingClassifier, StackingRegressor
except ImportError:
StackingClassifier = None
if pandas_installed():
import pandas
class TestSklearnPipeline(unittest.TestCase):
def test_pipeline(self):
data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_pipeline2(self):
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_union_in_pipeline(self):
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
model = Pipeline(
[
("scaler1", StandardScaler()),
("union", FeatureUnion([("scaler2", StandardScaler()), ("scaler3", MinMaxScaler())])),
]
)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_floats_ints(self):
data = [[0, 0.0], [0, 0.0], [1, 1.0], [1, 1.0]]
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_1(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("num", numeric_transformer, numeric_features)])
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_string(self):
"""
TODO: Hummingbird does not yet support strings in this context. Should raise error.
When this feature is complete, change this test.
"""
# fit
titanic_url = "https://raw.githubusercontent.com/amueller/scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv"
data = pandas.read_csv(titanic_url)
X = data.drop("survived", axis=1)
y = data["survived"]
# SimpleImputer on string is not available for string
# in ONNX-ML specifications.
# So we do it beforehand.
X["pclass"].fillna("missing", inplace=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
numeric_features = ["age", "fare"]
numeric_transformer = Pipeline(steps=[("imputer", SimpleImputer(strategy="median")), ("scaler", StandardScaler())])
categorical_features = ["pclass"]
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
clf = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", LogisticRegression(solver="liblinear"))])
to_drop = {"parch", "sibsp", "cabin", "ticket", "name", "body", "home.dest", "boat", "sex", "embarked"}
X_train = X_train.copy()
X_test = X_test.copy()
X_train["pclass"] = X_train["pclass"].astype(np.int64)
X_test["pclass"] = X_test["pclass"].astype(np.int64)
X_train = X_train.drop(to_drop, axis=1)
X_test = X_test.drop(to_drop, axis=1)
clf.fit(X_train, y_train)
torch_model = hummingbird.ml.convert(clf, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
clf.predict(X_test), torch_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_pandas(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_pandas_ts(self):
iris = datasets.load_iris()
X = np.array(iris.data[:, :3], np.float32) # If we don't use float32 here, with python 3.5 and torch 1.5.1 will fail.
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch.jit", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_weights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
)
model = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_weights_pandas(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_slice(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = slice(0, 1) # ["vA", "vB"]
categorical_features = slice(3, 4) # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
# Taken from https://github.com/microsoft/hummingbird/issues/388https://github.com/microsoft/hummingbird/issues/388
def test_pipeline_pca_rf(self):
X, y = make_regression(n_samples=1000, n_features=8, n_informative=5, n_targets=1, random_state=0, shuffle=True)
pca = PCA(n_components=8, svd_solver="randomized", whiten=True)
clf = make_pipeline(StandardScaler(), pca, RandomForestRegressor(n_estimators=10, max_depth=30, random_state=0))
clf.fit(X, y)
model = hummingbird.ml.convert(clf, "pytorch")
prediction_sk = clf.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
prediction_hb = model.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
np.testing.assert_allclose(prediction_sk, prediction_hb, rtol=1e-06, atol=1e-06)
@unittest.skipIf(not onnx_runtime_installed(), reason="Test requires ORT installed")
def test_pipeline_many_inputs(self):
n_features = 18
X = np.random.rand(100, n_features)
y = np.random.randint(1000, size=100)
scaler_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("scaling", scaler_transformer, list(range(n_features)))])
model = RandomForestRegressor(n_estimators=10, max_depth=9)
pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
pipeline.fit(X, y)
X_test = tuple(np.split(X, n_features, axis=1))
hb_model = hummingbird.ml.convert(pipeline, "onnx", X_test)
assert len(hb_model.model.graph.input) == n_features
np.testing.assert_allclose(
pipeline.predict(X), np.array(hb_model.predict(X_test)).flatten(), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not onnx_runtime_installed(), reason="Test requires ORT installed")
def test_pipeline_many_inputs_with_schema(self):
n_features = 5
X = np.random.rand(100, n_features)
y = np.random.randint(1000, size=100)
input_column_names = ["A", "B", "C", "D", "E"]
output_column_names = ["score"]
scaler_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("scaling", scaler_transformer, list(range(n_features)))])
model = RandomForestRegressor(n_estimators=10, max_depth=9)
pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
pipeline.fit(X, y)
X_test = tuple(np.split(X, n_features, axis=1))
extra_config = {constants.INPUT_NAMES: input_column_names, constants.OUTPUT_NAMES: output_column_names}
hb_model = hummingbird.ml.convert(pipeline, "onnx", X_test, extra_config=extra_config)
graph_inputs = [input.name for input in hb_model.model.graph.input]
graph_outputs = [output.name for output in hb_model.model.graph.output]
assert len(hb_model.model.graph.input) == n_features
assert graph_inputs == input_column_names
assert graph_outputs == output_column_names
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(clf, "torch")
np.testing.assert_allclose(
clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier_passthrough(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), passthrough=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(clf, "torch")
np.testing.assert_allclose(
clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier_decision_function(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LinearSVC(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
self.assertRaises(ValueError, hummingbird.ml.convert, clf, "torch")
@unittest.skipIf(StackingClassifier is None, reason="StackingRegressor not available in scikit-learn < 0.22")
def test_stacking_regressor(self):
X, y = load_diabetes(return_X_y=True)
estimators = [("lr", RidgeCV()), ("svr", LinearSVR(random_state=42))]
reg = StackingRegressor(estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
reg.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(reg, "torch")
np.testing.assert_allclose(
reg.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
if __name__ == "__main__":
unittest.main()
| 40.307278 | 127 | 0.630032 | import unittest
import numpy as np
from sklearn import datasets
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_iris, load_diabetes
from sklearn.svm import LinearSVC, LinearSVR
from sklearn.datasets import make_regression
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression, RidgeCV
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler
import hummingbird.ml
from hummingbird.ml._utils import pandas_installed, onnx_runtime_installed
from hummingbird.ml import constants
from onnxconverter_common.data_types import (
FloatTensorType,
Int64TensorType,
StringTensorType,
)
try:
from sklearn.impute import SimpleImputer
except ImportError:
from sklearn.preprocessing import Imputer as SimpleImputer
try:
from sklearn.ensemble import StackingClassifier, StackingRegressor
except ImportError:
StackingClassifier = None
if pandas_installed():
import pandas
class TestSklearnPipeline(unittest.TestCase):
def test_pipeline(self):
data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_pipeline2(self):
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_union_in_pipeline(self):
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)
model = Pipeline(
[
("scaler1", StandardScaler()),
("union", FeatureUnion([("scaler2", StandardScaler()), ("scaler3", MinMaxScaler())])),
]
)
model.fit(data)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
def test_combine_inputs_floats_ints(self):
data = [[0, 0.0], [0, 0.0], [1, 1.0], [1, 1.0]]
scaler = StandardScaler()
scaler.fit(data)
model = Pipeline([("scaler1", scaler), ("scaler2", scaler)])
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_1(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("num", numeric_transformer, numeric_features)])
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_string(self):
titanic_url = "https://raw.githubusercontent.com/amueller/scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv"
data = pandas.read_csv(titanic_url)
X = data.drop("survived", axis=1)
y = data["survived"]
X["pclass"].fillna("missing", inplace=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
numeric_features = ["age", "fare"]
numeric_transformer = Pipeline(steps=[("imputer", SimpleImputer(strategy="median")), ("scaler", StandardScaler())])
categorical_features = ["pclass"]
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
clf = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", LogisticRegression(solver="liblinear"))])
to_drop = {"parch", "sibsp", "cabin", "ticket", "name", "body", "home.dest", "boat", "sex", "embarked"}
X_train = X_train.copy()
X_test = X_test.copy()
X_train["pclass"] = X_train["pclass"].astype(np.int64)
X_test["pclass"] = X_test["pclass"].astype(np.int64)
X_train = X_train.drop(to_drop, axis=1)
X_test = X_test.drop(to_drop, axis=1)
clf.fit(X_train, y_train)
torch_model = hummingbird.ml.convert(clf, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
clf.predict(X_test), torch_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2]
categorical_features = [3, 4]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_pandas(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2]
categorical_features = [3, 4]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_pandas_ts(self):
iris = datasets.load_iris()
X = np.array(iris.data[:, :3], np.float32)
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
]
)
model = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch.jit", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_weights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
)
model = Pipeline(steps=[("preprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_weights_pandas(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1, 2] # ["vA", "vB", "vC"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch", X_test)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_drop_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="drop",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_noweights(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = [0, 1] # ["vA", "vB"]
categorical_features = [3, 4] # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not pandas_installed(), reason="Test requires pandas installed")
def test_pipeline_column_transformer_passthrough_slice(self):
iris = datasets.load_iris()
X = iris.data[:, :3]
y = iris.target
X_train = pandas.DataFrame(X, columns=["vA", "vB", "vC"])
X_train["vcat"] = X_train["vA"].apply(lambda x: 1 if x > 0.5 else 2)
X_train["vcat2"] = X_train["vB"].apply(lambda x: 3 if x > 0.5 else 4)
y_train = y % 2
numeric_features = slice(0, 1) # ["vA", "vB"]
categorical_features = slice(3, 4) # ["vcat", "vcat2"]
classifier = LogisticRegression(
C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver="liblinear", tol=1e-3,
)
numeric_transformer = Pipeline(steps=[("scaler", StandardScaler())])
categorical_transformer = Pipeline(steps=[("onehot", OneHotEncoder(sparse=True, handle_unknown="ignore"))])
preprocessor = ColumnTransformer(
transformers=[
("num", numeric_transformer, numeric_features),
("cat", categorical_transformer, categorical_features),
],
transformer_weights={"num": 2, "cat": 3},
remainder="passthrough",
)
model = Pipeline(steps=[("precprocessor", preprocessor), ("classifier", classifier)])
model.fit(X_train, y_train)
X_test = X_train[:11]
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(
model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,
)
# Taken from https://github.com/microsoft/hummingbird/issues/388https://github.com/microsoft/hummingbird/issues/388
def test_pipeline_pca_rf(self):
X, y = make_regression(n_samples=1000, n_features=8, n_informative=5, n_targets=1, random_state=0, shuffle=True)
pca = PCA(n_components=8, svd_solver="randomized", whiten=True)
clf = make_pipeline(StandardScaler(), pca, RandomForestRegressor(n_estimators=10, max_depth=30, random_state=0))
clf.fit(X, y)
model = hummingbird.ml.convert(clf, "pytorch")
prediction_sk = clf.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
prediction_hb = model.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
np.testing.assert_allclose(prediction_sk, prediction_hb, rtol=1e-06, atol=1e-06)
@unittest.skipIf(not onnx_runtime_installed(), reason="Test requires ORT installed")
def test_pipeline_many_inputs(self):
n_features = 18
X = np.random.rand(100, n_features)
y = np.random.randint(1000, size=100)
scaler_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("scaling", scaler_transformer, list(range(n_features)))])
model = RandomForestRegressor(n_estimators=10, max_depth=9)
pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
pipeline.fit(X, y)
X_test = tuple(np.split(X, n_features, axis=1))
hb_model = hummingbird.ml.convert(pipeline, "onnx", X_test)
assert len(hb_model.model.graph.input) == n_features
np.testing.assert_allclose(
pipeline.predict(X), np.array(hb_model.predict(X_test)).flatten(), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(not onnx_runtime_installed(), reason="Test requires ORT installed")
def test_pipeline_many_inputs_with_schema(self):
n_features = 5
X = np.random.rand(100, n_features)
y = np.random.randint(1000, size=100)
input_column_names = ["A", "B", "C", "D", "E"]
output_column_names = ["score"]
scaler_transformer = Pipeline(steps=[("scaler", StandardScaler())])
preprocessor = ColumnTransformer(transformers=[("scaling", scaler_transformer, list(range(n_features)))])
model = RandomForestRegressor(n_estimators=10, max_depth=9)
pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
pipeline.fit(X, y)
X_test = tuple(np.split(X, n_features, axis=1))
extra_config = {constants.INPUT_NAMES: input_column_names, constants.OUTPUT_NAMES: output_column_names}
hb_model = hummingbird.ml.convert(pipeline, "onnx", X_test, extra_config=extra_config)
graph_inputs = [input.name for input in hb_model.model.graph.input]
graph_outputs = [output.name for output in hb_model.model.graph.output]
assert len(hb_model.model.graph.input) == n_features
assert graph_inputs == input_column_names
assert graph_outputs == output_column_names
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(clf, "torch")
np.testing.assert_allclose(
clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier_passthrough(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), passthrough=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(clf, "torch")
np.testing.assert_allclose(
clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
@unittest.skipIf(StackingClassifier is None, reason="StackingClassifier not available in scikit-learn < 0.22")
def test_stacking_classifier_decision_function(self):
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LinearSVC(random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train)
self.assertRaises(ValueError, hummingbird.ml.convert, clf, "torch")
@unittest.skipIf(StackingClassifier is None, reason="StackingRegressor not available in scikit-learn < 0.22")
def test_stacking_regressor(self):
X, y = load_diabetes(return_X_y=True)
estimators = [("lr", RidgeCV()), ("svr", LinearSVR(random_state=42))]
reg = StackingRegressor(estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
reg.fit(X_train, y_train)
hb_model = hummingbird.ml.convert(reg, "torch")
np.testing.assert_allclose(
reg.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,
)
if __name__ == "__main__":
unittest.main()
| true | true |
f7173f7ed9d0c8bc4d136449f83ae47f59a3b4aa | 161 | py | Python | shiyanlou_cs596-1805f3c438/design3.py | tongxindao/shiyanlou | 1d002ea342deb69066c287db9935f77f49f0a09e | [
"Apache-2.0"
] | null | null | null | shiyanlou_cs596-1805f3c438/design3.py | tongxindao/shiyanlou | 1d002ea342deb69066c287db9935f77f49f0a09e | [
"Apache-2.0"
] | null | null | null | shiyanlou_cs596-1805f3c438/design3.py | tongxindao/shiyanlou | 1d002ea342deb69066c287db9935f77f49f0a09e | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
row = int(input("Enter the number of rows: "))
n = row
while n >= 0:
x = "*" * n
y = " " * (row - n)
print(y + x)
n -= 1
| 17.888889 | 46 | 0.459627 |
row = int(input("Enter the number of rows: "))
n = row
while n >= 0:
x = "*" * n
y = " " * (row - n)
print(y + x)
n -= 1
| true | true |
f7173fb688f43ba2ac42d7b1dfdd0e7fc7e3dcf5 | 3,784 | py | Python | Lib/site-packages/django_extensions/validators.py | Nibraz15/FullTextSearch | 79d03a9b5c0fc94219ad9a70fe57818496844660 | [
"bzip2-1.0.6"
] | 1 | 2019-12-22T23:37:28.000Z | 2019-12-22T23:37:28.000Z | Lib/site-packages/django_extensions/validators.py | Nibraz15/FullTextSearch | 79d03a9b5c0fc94219ad9a70fe57818496844660 | [
"bzip2-1.0.6"
] | 10 | 2020-06-05T21:41:01.000Z | 2022-02-10T07:33:38.000Z | Lib/site-packages/django_extensions/validators.py | Nibraz15/FullTextSearch | 79d03a9b5c0fc94219ad9a70fe57818496844660 | [
"bzip2-1.0.6"
] | 3 | 2020-08-07T16:16:54.000Z | 2020-10-12T18:06:35.000Z | # -*- coding: utf-8 -*-
import unicodedata
import binascii
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
@deconstructible
class NoControlCharactersValidator(object):
message = _("Control Characters like new lines or tabs are not allowed.")
code = "no_control_characters"
whitelist = None
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
if whitelist:
self.whitelist = whitelist
def __call__(self, value):
value = force_text(value)
whitelist = self.whitelist
category = unicodedata.category
for character in value:
if whitelist and character in whitelist:
continue
if category(character)[0] == "C":
params = {'value': value, 'whitelist': whitelist}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoControlCharactersValidator) and
(self.whitelist == other.whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
@deconstructible
class NoWhitespaceValidator(object):
message = _("Leading and Trailing whitespaces are not allowed.")
code = "no_whitespace"
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
def __call__(self, value):
value = force_text(value)
if value != value.strip():
params = {'value': value}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoWhitespaceValidator) and
(self.message == other.message) and
(self.code == other.code)
)
@deconstructible
class HexValidator(object):
messages = {
'invalid': _("Only a hex string is allowed."),
'length': _("Invalid length. Must be %(length)d characters."),
'min_length': _("Ensure that there are more than %(min)s characters."),
'max_length': _("Ensure that there are no more than %(max)s characters."),
}
code = "hex_only"
def __init__(self, length=None, min_length=None, max_length=None, message=None, code=None):
self.length = length
self.min_length = min_length
self.max_length = max_length
if message:
self.message = message
if code:
self.code = code
def __call__(self, value):
value = force_text(value)
if self.length and len(value) != self.length:
raise ValidationError(self.messages['length'], code='hex_only_length', params={'length': self.length})
if self.min_length and len(value) < self.min_length:
raise ValidationError(self.messages['min_length'], code='hex_only_min_length', params={'min': self.min_length})
if self.max_length and len(value) < self.max_length:
raise ValidationError(self.messages['max_length'], code='hex_only_max_length', params={'max': self.max_length})
try:
binascii.unhexlify(value)
except (TypeError, binascii.Error):
raise ValidationError(self.messages['invalid'], code='hex_only')
def __eq__(self, other):
return (
isinstance(other, HexValidator) and
(self.message == other.message) and
(self.code == other.code)
)
| 34.4 | 123 | 0.624736 |
import unicodedata
import binascii
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
@deconstructible
class NoControlCharactersValidator(object):
message = _("Control Characters like new lines or tabs are not allowed.")
code = "no_control_characters"
whitelist = None
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
if whitelist:
self.whitelist = whitelist
def __call__(self, value):
value = force_text(value)
whitelist = self.whitelist
category = unicodedata.category
for character in value:
if whitelist and character in whitelist:
continue
if category(character)[0] == "C":
params = {'value': value, 'whitelist': whitelist}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoControlCharactersValidator) and
(self.whitelist == other.whitelist) and
(self.message == other.message) and
(self.code == other.code)
)
@deconstructible
class NoWhitespaceValidator(object):
message = _("Leading and Trailing whitespaces are not allowed.")
code = "no_whitespace"
def __init__(self, message=None, code=None, whitelist=None):
if message:
self.message = message
if code:
self.code = code
def __call__(self, value):
value = force_text(value)
if value != value.strip():
params = {'value': value}
raise ValidationError(self.message, code=self.code, params=params)
def __eq__(self, other):
return (
isinstance(other, NoWhitespaceValidator) and
(self.message == other.message) and
(self.code == other.code)
)
@deconstructible
class HexValidator(object):
messages = {
'invalid': _("Only a hex string is allowed."),
'length': _("Invalid length. Must be %(length)d characters."),
'min_length': _("Ensure that there are more than %(min)s characters."),
'max_length': _("Ensure that there are no more than %(max)s characters."),
}
code = "hex_only"
def __init__(self, length=None, min_length=None, max_length=None, message=None, code=None):
self.length = length
self.min_length = min_length
self.max_length = max_length
if message:
self.message = message
if code:
self.code = code
def __call__(self, value):
value = force_text(value)
if self.length and len(value) != self.length:
raise ValidationError(self.messages['length'], code='hex_only_length', params={'length': self.length})
if self.min_length and len(value) < self.min_length:
raise ValidationError(self.messages['min_length'], code='hex_only_min_length', params={'min': self.min_length})
if self.max_length and len(value) < self.max_length:
raise ValidationError(self.messages['max_length'], code='hex_only_max_length', params={'max': self.max_length})
try:
binascii.unhexlify(value)
except (TypeError, binascii.Error):
raise ValidationError(self.messages['invalid'], code='hex_only')
def __eq__(self, other):
return (
isinstance(other, HexValidator) and
(self.message == other.message) and
(self.code == other.code)
)
| true | true |
f71740b8d42b1368ce90e20d97da178845afeb85 | 2,344 | py | Python | nodes/lcm_to_ros/xtion/rgbd_t.py | mrfmap/mrfmap_ros | 8c1e108860ff297f39591d97f8f8ce2937b29a51 | [
"BSD-3-Clause"
] | 6 | 2020-07-15T21:00:49.000Z | 2021-05-12T07:16:38.000Z | nodes/lcm_to_ros/xtion/rgbd_t.py | mrfmap/mrfmap_ros | 8c1e108860ff297f39591d97f8f8ce2937b29a51 | [
"BSD-3-Clause"
] | 2 | 2020-08-14T16:16:21.000Z | 2020-11-12T07:43:22.000Z | nodes/lcm_to_ros/xtion/rgbd_t.py | mrfmap/mrfmap_ros | 8c1e108860ff297f39591d97f8f8ce2937b29a51 | [
"BSD-3-Clause"
] | 2 | 2020-08-16T15:53:14.000Z | 2021-05-12T07:16:41.000Z | """LCM type definitions
This file automatically generated by lcm.
DO NOT MODIFY BY HAND!!!!
"""
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class rgbd_t(object):
__slots__ = ["utime", "width", "height", "rgblen", "depthlen", "rgb", "depth"]
__typenames__ = ["int64_t", "int32_t", "int32_t", "int32_t", "int32_t", "byte", "byte"]
__dimensions__ = [None, None, None, None, None, ["rgblen"], ["depthlen"]]
def __init__(self):
self.utime = 0
self.width = 0
self.height = 0
self.rgblen = 0
self.depthlen = 0
self.rgb = ""
self.depth = ""
def encode(self):
buf = BytesIO()
buf.write(rgbd_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">qiiii", self.utime, self.width, self.height, self.rgblen, self.depthlen))
buf.write(bytearray(self.rgb[:self.rgblen]))
buf.write(bytearray(self.depth[:self.depthlen]))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != rgbd_t._get_packed_fingerprint():
raise ValueError("Decode error")
return rgbd_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = rgbd_t()
self.utime, self.width, self.height, self.rgblen, self.depthlen = struct.unpack(">qiiii", buf.read(24))
self.rgb = buf.read(self.rgblen)
self.depth = buf.read(self.depthlen)
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if rgbd_t in parents: return 0
tmphash = (0x9765ad14343d07fc) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if rgbd_t._packed_fingerprint is None:
rgbd_t._packed_fingerprint = struct.pack(">Q", rgbd_t._get_hash_recursive([]))
return rgbd_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
| 32.555556 | 111 | 0.638652 |
try:
import cStringIO.StringIO as BytesIO
except ImportError:
from io import BytesIO
import struct
class rgbd_t(object):
__slots__ = ["utime", "width", "height", "rgblen", "depthlen", "rgb", "depth"]
__typenames__ = ["int64_t", "int32_t", "int32_t", "int32_t", "int32_t", "byte", "byte"]
__dimensions__ = [None, None, None, None, None, ["rgblen"], ["depthlen"]]
def __init__(self):
self.utime = 0
self.width = 0
self.height = 0
self.rgblen = 0
self.depthlen = 0
self.rgb = ""
self.depth = ""
def encode(self):
buf = BytesIO()
buf.write(rgbd_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(struct.pack(">qiiii", self.utime, self.width, self.height, self.rgblen, self.depthlen))
buf.write(bytearray(self.rgb[:self.rgblen]))
buf.write(bytearray(self.depth[:self.depthlen]))
def decode(data):
if hasattr(data, 'read'):
buf = data
else:
buf = BytesIO(data)
if buf.read(8) != rgbd_t._get_packed_fingerprint():
raise ValueError("Decode error")
return rgbd_t._decode_one(buf)
decode = staticmethod(decode)
def _decode_one(buf):
self = rgbd_t()
self.utime, self.width, self.height, self.rgblen, self.depthlen = struct.unpack(">qiiii", buf.read(24))
self.rgb = buf.read(self.rgblen)
self.depth = buf.read(self.depthlen)
return self
_decode_one = staticmethod(_decode_one)
_hash = None
def _get_hash_recursive(parents):
if rgbd_t in parents: return 0
tmphash = (0x9765ad14343d07fc) & 0xffffffffffffffff
tmphash = (((tmphash<<1)&0xffffffffffffffff) + (tmphash>>63)) & 0xffffffffffffffff
return tmphash
_get_hash_recursive = staticmethod(_get_hash_recursive)
_packed_fingerprint = None
def _get_packed_fingerprint():
if rgbd_t._packed_fingerprint is None:
rgbd_t._packed_fingerprint = struct.pack(">Q", rgbd_t._get_hash_recursive([]))
return rgbd_t._packed_fingerprint
_get_packed_fingerprint = staticmethod(_get_packed_fingerprint)
| true | true |
f717410eba8874ad5c80c1bd9eb7064162476ab8 | 967 | py | Python | Trakttv.bundle/Contents/Tests/plex_mock/models.py | disrupted/Trakttv.bundle | 24712216c71f3b22fd58cb5dd89dad5bb798ed60 | [
"RSA-MD"
] | 1,346 | 2015-01-01T14:52:24.000Z | 2022-03-28T12:50:48.000Z | Trakttv.bundle/Contents/Tests/plex_mock/models.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
] | 474 | 2015-01-01T10:27:46.000Z | 2022-03-21T12:26:16.000Z | Trakttv.bundle/Contents/Tests/plex_mock/models.py | alcroito/Plex-Trakt-Scrobbler | 4f83fb0860dcb91f860d7c11bc7df568913c82a6 | [
"RSA-MD"
] | 191 | 2015-01-02T18:27:22.000Z | 2022-03-29T10:49:48.000Z | class LibraryMetadata(object):
def __init__(self, section=None):
self.section = section
class LibrarySection(object):
def __init__(self, title=None):
self.title = title
class Session(object):
def __init__(self, **kwargs):
self.rating_key = None
self.state = None
self.duration = None
self.view_offset = None
self.part = None
self.update(**kwargs)
@property
def payload(self):
return {
'rating_key': self.rating_key,
'view_offset': self.view_offset,
'part': self.part
}
def save(self):
pass
def update(self, **kwargs):
for key, value in kwargs.items():
if not hasattr(self, key):
raise KeyError('Unknown attribute with key %r', key)
setattr(self, key, value)
def __repr__(self):
return '<Session state: %r>' % (
self.state
)
| 21.977273 | 68 | 0.553257 | class LibraryMetadata(object):
def __init__(self, section=None):
self.section = section
class LibrarySection(object):
def __init__(self, title=None):
self.title = title
class Session(object):
def __init__(self, **kwargs):
self.rating_key = None
self.state = None
self.duration = None
self.view_offset = None
self.part = None
self.update(**kwargs)
@property
def payload(self):
return {
'rating_key': self.rating_key,
'view_offset': self.view_offset,
'part': self.part
}
def save(self):
pass
def update(self, **kwargs):
for key, value in kwargs.items():
if not hasattr(self, key):
raise KeyError('Unknown attribute with key %r', key)
setattr(self, key, value)
def __repr__(self):
return '<Session state: %r>' % (
self.state
)
| true | true |
f717418e6536d845980ad96232de54991a5746ec | 12,804 | py | Python | app/controller.py | Effenberg0x0/ci_edit | ea78621164152b1f489cae8e53994fad52c01c16 | [
"Apache-2.0"
] | 1 | 2019-01-21T07:35:14.000Z | 2019-01-21T07:35:14.000Z | app/controller.py | Effenberg0x0/ci_edit | ea78621164152b1f489cae8e53994fad52c01c16 | [
"Apache-2.0"
] | null | null | null | app/controller.py | Effenberg0x0/ci_edit | ea78621164152b1f489cae8e53994fad52c01c16 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager for key bindings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import curses.ascii
import app.config
import app.curses_util
import app.log
#import app.window
class Controller:
"""A Controller is a keyboard mapping from keyboard/mouse events to editor
commands."""
def __init__(self, view, name):
if app.config.strict_debug:
assert issubclass(self.__class__, Controller)
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.textBuffer = None
self.name = name
def parentController(self):
view = self.view.parent
while view is not None:
if view.controller is not None:
return view.controller
view = view.parent
def changeToConfirmClose(self):
self.findAndChangeTo('confirmClose')
def changeToConfirmOverwrite(self):
self.findAndChangeTo('confirmOverwrite')
def changeToFileManagerWindow(self, *args):
self.findAndChangeTo('fileManagerWindow')
def changeToConfirmQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToHostWindow(self, *args):
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.view.__class__, app.window.Window), self.view
assert issubclass(host.__class__, app.window.Window), host
self.view.changeFocusTo(host)
def changeToInputWindow(self, *args):
self.findAndChangeTo('inputWindow')
def changeToFind(self):
self.findAndChangeTo('interactiveFind')
def changeToFindPrior(self):
curses.ungetch(self.savedCh)
self.findAndChangeTo('interactiveFind')
def changeToGoto(self):
self.findAndChangeTo('interactiveGoto')
def changeToPaletteWindow(self):
self.findAndChangeTo('paletteWindow')
def changeToPopup(self):
self.findAndChangeTo('popupWindow')
def changeToPrediction(self):
self.findAndChangeTo('predictionWindow')
#self.findAndChangeTo('interactivePrediction')
def changeToPrompt(self):
self.findAndChangeTo('interactivePrompt')
def changeToQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToSaveAs(self):
view = self.getNamedWindow('fileManagerWindow')
view.setMode('saveAs')
view.bringToFront()
view.changeFocusTo(view)
def createNewTextBuffer(self):
bufferManager = self.view.program.bufferManager
self.view.setTextBuffer(bufferManager.newTextBuffer())
def doCommand(self, ch, meta):
# Check the commandSet for the input with both its string and integer
# representation.
self.savedCh = ch
cmd = (self.commandSet.get(ch) or
self.commandSet.get(app.curses_util.cursesKeyName(ch)))
if cmd:
cmd()
else:
self.commandDefault(ch, meta)
self.textBuffer.compoundChangePush()
def getNamedWindow(self, windowName):
view = self.view
while view is not None:
if hasattr(view, windowName):
return getattr(view, windowName)
view = view.parent
app.log.fatal(windowName + ' not found')
return None
def currentInputWindow(self):
return self.getNamedWindow('inputWindow')
def findAndChangeTo(self, windowName):
window = self.getNamedWindow(windowName)
window.bringToFront()
self.view.changeFocusTo(window)
def changeTo(self, window):
window.bringToFront()
self.view.changeFocusTo(window)
def focus(self):
app.log.info('base controller focus()')
def confirmationPromptFinish(self, *args):
window = self.getNamedWindow('inputWindow')
window.userIntent = 'edit'
window.bringToFront()
self.view.changeFocusTo(window)
def __closeHostFile(self, host):
"""Close the current file and switch to another or create an empty
file."""
bufferManager = host.program.bufferManager
bufferManager.closeTextBuffer(host.textBuffer)
host.userIntent = 'edit'
tb = bufferManager.getUnsavedBuffer()
if not tb:
tb = bufferManager.nextBuffer()
if not tb:
tb = bufferManager.newTextBuffer()
host.setTextBuffer(tb)
def closeFile(self):
app.log.info()
host = self.getNamedWindow('inputWindow')
self.__closeHostFile(host)
self.confirmationPromptFinish()
def closeOrConfirmClose(self):
"""If the file is clean, close it. If it is dirty, prompt the user
about whether to lose unsaved changes."""
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isDirty():
self.__closeHostFile(host)
return
if host.userIntent == 'edit':
host.userIntent = 'close'
self.changeToConfirmClose()
def initiateClose(self):
"""Called from input window controller."""
self.view.userIntent = 'close'
tb = self.view.textBuffer
if not tb.isDirty():
self.__closeHostFile(self.view)
return
self.view.changeFocusTo(self.view.confirmClose)
def initiateQuit(self):
"""Called from input window controller."""
self.view.userIntent = 'quit'
tb = self.view.textBuffer
if tb.isDirty():
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
self.view.setTextBuffer(tb)
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager.debugLog()
self.view.quitNow()
def initiateSave(self):
"""Called from input window controller."""
self.view.userIntent = 'edit'
tb = self.view.textBuffer
if tb.fullPath:
if not tb.isSafeToWrite():
self.view.changeFocusTo(self.view.confirmOverwrite)
return
tb.fileWrite()
return
self.changeToSaveAs()
def overwriteHostFile(self):
"""Close the current file and switch to another or create an empty
file.
"""
host = self.getNamedWindow('inputWindow')
host.textBuffer.fileWrite()
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
def nextFocusableWindow(self):
window = self.view.parent.nextFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def priorFocusableWindow(self):
window = self.view.parent.priorFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def writeOrConfirmOverwrite(self):
"""Ask whether the file should be overwritten."""
app.log.debug()
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isSafeToWrite():
self.changeToConfirmOverwrite()
return
tb.fileWrite()
# TODO(dschuyler): Is there a deeper issue here that necessitates saving
# the message? Does this only need to wrap the changeToHostWindow()?
# Store the save message so it is not overwritten.
saveMessage = tb.message
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
tb.message = saveMessage # Restore the save message.
def quitOrSwitchToConfirmQuit(self):
app.log.debug(self, self.view)
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
host.userIntent = 'quit'
if tb.isDirty():
self.changeToConfirmQuit()
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
host.setTextBuffer(tb)
self.changeToConfirmQuit()
return
bufferManager.debugLog()
host.quitNow()
def saveOrChangeToSaveAs(self):
app.log.debug()
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.__class__, Controller), self
assert issubclass(self.view.__class__, app.window.Window), self
assert issubclass(host.__class__, app.window.Window), self
assert self.view.textBuffer is self.textBuffer
assert self.view.textBuffer is not host.textBuffer
if host.textBuffer.fullPath:
self.writeOrConfirmOverwrite()
return
self.changeToSaveAs()
def onChange(self):
pass
def saveEventChangeToHostWindow(self, *args):
curses.ungetch(self.savedCh)
host = self.getNamedWindow('inputWindow')
host.bringToFront()
self.view.changeFocusTo(host)
def setTextBuffer(self, textBuffer):
if app.config.strict_debug:
assert issubclass(textBuffer.__class__,
app.text_buffer.TextBuffer), textBuffer
assert self.view.textBuffer is textBuffer
self.textBuffer = textBuffer
def unfocus(self):
pass
class MainController:
"""The different keyboard mappings are different controllers. This class
manages a collection of keyboard mappings and allows the user to switch
between them."""
def __init__(self, view):
if app.config.strict_debug:
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.controllers = {}
self.controller = None
def add(self, controller):
self.controllers[controller.name] = controller
self.controller = controller
def doCommand(self, ch, meta):
self.controller.doCommand(ch, meta)
def focus(self):
app.log.info('MainController.focus')
self.controller.focus()
if 0:
self.commandDefault = self.controller.commandDefault
commandSet = self.controller.commandSet.copy()
commandSet.update({
app.curses_util.KEY_F2: self.nextController,
})
self.controller.commandSet = commandSet
def onChange(self):
self.controller.onChange()
def nextController(self):
app.log.info('nextController')
if 0:
if self.controller is self.controllers['cuaPlus']:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
elif self.controller is self.controllers['cua']:
app.log.info('MainController.nextController emacs')
self.controller = self.controllers['emacs']
elif self.controller is self.controllers['emacs']:
app.log.info('MainController.nextController vi')
self.controller = self.controllers['vi']
else:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
self.controller.setTextBuffer(self.textBuffer)
self.focus()
def setTextBuffer(self, textBuffer):
app.log.info('MainController.setTextBuffer', self.controller)
if app.config.strict_debug:
assert issubclass(textBuffer.__class__, app.text_buffer.TextBuffer)
self.textBuffer = textBuffer
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.controller.unfocus()
| 33.873016 | 80 | 0.637848 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
import curses.ascii
import app.config
import app.curses_util
import app.log
class Controller:
def __init__(self, view, name):
if app.config.strict_debug:
assert issubclass(self.__class__, Controller)
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.textBuffer = None
self.name = name
def parentController(self):
view = self.view.parent
while view is not None:
if view.controller is not None:
return view.controller
view = view.parent
def changeToConfirmClose(self):
self.findAndChangeTo('confirmClose')
def changeToConfirmOverwrite(self):
self.findAndChangeTo('confirmOverwrite')
def changeToFileManagerWindow(self, *args):
self.findAndChangeTo('fileManagerWindow')
def changeToConfirmQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToHostWindow(self, *args):
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.view.__class__, app.window.Window), self.view
assert issubclass(host.__class__, app.window.Window), host
self.view.changeFocusTo(host)
def changeToInputWindow(self, *args):
self.findAndChangeTo('inputWindow')
def changeToFind(self):
self.findAndChangeTo('interactiveFind')
def changeToFindPrior(self):
curses.ungetch(self.savedCh)
self.findAndChangeTo('interactiveFind')
def changeToGoto(self):
self.findAndChangeTo('interactiveGoto')
def changeToPaletteWindow(self):
self.findAndChangeTo('paletteWindow')
def changeToPopup(self):
self.findAndChangeTo('popupWindow')
def changeToPrediction(self):
self.findAndChangeTo('predictionWindow')
def changeToPrompt(self):
self.findAndChangeTo('interactivePrompt')
def changeToQuit(self):
self.findAndChangeTo('interactiveQuit')
def changeToSaveAs(self):
view = self.getNamedWindow('fileManagerWindow')
view.setMode('saveAs')
view.bringToFront()
view.changeFocusTo(view)
def createNewTextBuffer(self):
bufferManager = self.view.program.bufferManager
self.view.setTextBuffer(bufferManager.newTextBuffer())
def doCommand(self, ch, meta):
self.savedCh = ch
cmd = (self.commandSet.get(ch) or
self.commandSet.get(app.curses_util.cursesKeyName(ch)))
if cmd:
cmd()
else:
self.commandDefault(ch, meta)
self.textBuffer.compoundChangePush()
def getNamedWindow(self, windowName):
view = self.view
while view is not None:
if hasattr(view, windowName):
return getattr(view, windowName)
view = view.parent
app.log.fatal(windowName + ' not found')
return None
def currentInputWindow(self):
return self.getNamedWindow('inputWindow')
def findAndChangeTo(self, windowName):
window = self.getNamedWindow(windowName)
window.bringToFront()
self.view.changeFocusTo(window)
def changeTo(self, window):
window.bringToFront()
self.view.changeFocusTo(window)
def focus(self):
app.log.info('base controller focus()')
def confirmationPromptFinish(self, *args):
window = self.getNamedWindow('inputWindow')
window.userIntent = 'edit'
window.bringToFront()
self.view.changeFocusTo(window)
def __closeHostFile(self, host):
bufferManager = host.program.bufferManager
bufferManager.closeTextBuffer(host.textBuffer)
host.userIntent = 'edit'
tb = bufferManager.getUnsavedBuffer()
if not tb:
tb = bufferManager.nextBuffer()
if not tb:
tb = bufferManager.newTextBuffer()
host.setTextBuffer(tb)
def closeFile(self):
app.log.info()
host = self.getNamedWindow('inputWindow')
self.__closeHostFile(host)
self.confirmationPromptFinish()
def closeOrConfirmClose(self):
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isDirty():
self.__closeHostFile(host)
return
if host.userIntent == 'edit':
host.userIntent = 'close'
self.changeToConfirmClose()
def initiateClose(self):
self.view.userIntent = 'close'
tb = self.view.textBuffer
if not tb.isDirty():
self.__closeHostFile(self.view)
return
self.view.changeFocusTo(self.view.confirmClose)
def initiateQuit(self):
self.view.userIntent = 'quit'
tb = self.view.textBuffer
if tb.isDirty():
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
self.view.setTextBuffer(tb)
self.view.changeFocusTo(self.view.interactiveQuit)
return
bufferManager.debugLog()
self.view.quitNow()
def initiateSave(self):
self.view.userIntent = 'edit'
tb = self.view.textBuffer
if tb.fullPath:
if not tb.isSafeToWrite():
self.view.changeFocusTo(self.view.confirmOverwrite)
return
tb.fileWrite()
return
self.changeToSaveAs()
def overwriteHostFile(self):
host = self.getNamedWindow('inputWindow')
host.textBuffer.fileWrite()
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
def nextFocusableWindow(self):
window = self.view.parent.nextFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def priorFocusableWindow(self):
window = self.view.parent.priorFocusableWindow(self.view)
if window is not None:
self.view.changeFocusTo(window)
return window is not None
def writeOrConfirmOverwrite(self):
app.log.debug()
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
if not tb.isSafeToWrite():
self.changeToConfirmOverwrite()
return
tb.fileWrite()
saveMessage = tb.message
if host.userIntent == 'quit':
self.quitOrSwitchToConfirmQuit()
return
if host.userIntent == 'close':
self.__closeHostFile(host)
self.changeToHostWindow()
tb.message = saveMessage
def quitOrSwitchToConfirmQuit(self):
app.log.debug(self, self.view)
host = self.getNamedWindow('inputWindow')
tb = host.textBuffer
host.userIntent = 'quit'
if tb.isDirty():
self.changeToConfirmQuit()
return
bufferManager = self.view.program.bufferManager
tb = bufferManager.getUnsavedBuffer()
if tb:
host.setTextBuffer(tb)
self.changeToConfirmQuit()
return
bufferManager.debugLog()
host.quitNow()
def saveOrChangeToSaveAs(self):
app.log.debug()
host = self.getNamedWindow('inputWindow')
if app.config.strict_debug:
assert issubclass(self.__class__, Controller), self
assert issubclass(self.view.__class__, app.window.Window), self
assert issubclass(host.__class__, app.window.Window), self
assert self.view.textBuffer is self.textBuffer
assert self.view.textBuffer is not host.textBuffer
if host.textBuffer.fullPath:
self.writeOrConfirmOverwrite()
return
self.changeToSaveAs()
def onChange(self):
pass
def saveEventChangeToHostWindow(self, *args):
curses.ungetch(self.savedCh)
host = self.getNamedWindow('inputWindow')
host.bringToFront()
self.view.changeFocusTo(host)
def setTextBuffer(self, textBuffer):
if app.config.strict_debug:
assert issubclass(textBuffer.__class__,
app.text_buffer.TextBuffer), textBuffer
assert self.view.textBuffer is textBuffer
self.textBuffer = textBuffer
def unfocus(self):
pass
class MainController:
def __init__(self, view):
if app.config.strict_debug:
assert issubclass(view.__class__, app.window.Window)
self.view = view
self.commandDefault = None
self.commandSet = None
self.controllers = {}
self.controller = None
def add(self, controller):
self.controllers[controller.name] = controller
self.controller = controller
def doCommand(self, ch, meta):
self.controller.doCommand(ch, meta)
def focus(self):
app.log.info('MainController.focus')
self.controller.focus()
if 0:
self.commandDefault = self.controller.commandDefault
commandSet = self.controller.commandSet.copy()
commandSet.update({
app.curses_util.KEY_F2: self.nextController,
})
self.controller.commandSet = commandSet
def onChange(self):
self.controller.onChange()
def nextController(self):
app.log.info('nextController')
if 0:
if self.controller is self.controllers['cuaPlus']:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
elif self.controller is self.controllers['cua']:
app.log.info('MainController.nextController emacs')
self.controller = self.controllers['emacs']
elif self.controller is self.controllers['emacs']:
app.log.info('MainController.nextController vi')
self.controller = self.controllers['vi']
else:
app.log.info('MainController.nextController cua')
self.controller = self.controllers['cua']
self.controller.setTextBuffer(self.textBuffer)
self.focus()
def setTextBuffer(self, textBuffer):
app.log.info('MainController.setTextBuffer', self.controller)
if app.config.strict_debug:
assert issubclass(textBuffer.__class__, app.text_buffer.TextBuffer)
self.textBuffer = textBuffer
self.controller.setTextBuffer(textBuffer)
def unfocus(self):
self.controller.unfocus()
| true | true |
f717419b82396fef65c67279f9002be3e2b4df00 | 4,704 | py | Python | scratchpad/basic_async_server.py | cnb0/katcp-python | 35c860bc17ee6404cc59a14f7d1b8ac1fae4b73c | [
"BSD-3-Clause"
] | 8 | 2015-02-25T20:13:54.000Z | 2019-09-12T06:12:07.000Z | scratchpad/basic_async_server.py | cnb0/katcp-python | 35c860bc17ee6404cc59a14f7d1b8ac1fae4b73c | [
"BSD-3-Clause"
] | 67 | 2015-01-12T09:58:36.000Z | 2021-05-12T14:23:26.000Z | scratchpad/basic_async_server.py | cnb0/katcp-python | 35c860bc17ee6404cc59a14f7d1b8ac1fae4b73c | [
"BSD-3-Clause"
] | 15 | 2015-04-28T13:18:28.000Z | 2021-01-19T16:16:33.000Z | # Copyright 2016 National Research Foundation (South African Radio Astronomy Observatory)
# BSD license - see LICENSE for details
from __future__ import absolute_import, division, print_function
import random
import signal
import time
import tornado
from katcp import AsyncReply, DeviceServer, ProtocolFlags, Sensor
from katcp.kattypes import (Discrete, Float, Str, Timestamp, request,
return_reply)
server_host = ""
server_port = 5000
class MyServer(DeviceServer):
VERSION_INFO = ("example-api", 1, 0)
BUILD_INFO = ("example-implementation", 0, 1, "")
# Optionally set the KATCP protocol version and features. Defaults to
# the latest implemented version of KATCP, with all supported optional
# features
PROTOCOL_INFO = ProtocolFlags(5, 0, set([
ProtocolFlags.MULTI_CLIENT,
ProtocolFlags.MESSAGE_IDS,
]))
FRUIT = [
"apple", "banana", "pear", "kiwi",
]
def setup_sensors(self):
"""Setup some server sensors."""
self._add_result = Sensor.float("add.result",
"Last ?add result.", "", [-10000, 10000])
self._add_result.set_value(0, Sensor.UNREACHABLE)
self._time_result = Sensor.timestamp("time.result",
"Last ?time result.", "")
self._time_result.set_value(0, Sensor.INACTIVE)
self._eval_result = Sensor.string("eval.result",
"Last ?eval result.", "")
self._eval_result.set_value('', Sensor.UNKNOWN)
self._fruit_result = Sensor.discrete("fruit.result",
"Last ?pick-fruit result.", "", self.FRUIT)
self._fruit_result.set_value('apple', Sensor.ERROR)
self.add_sensor(self._add_result)
self.add_sensor(self._time_result)
self.add_sensor(self._eval_result)
self.add_sensor(self._fruit_result)
@request(Float(), Float())
@return_reply(Float())
def request_add(self, req, x, y):
"""Add two numbers"""
r = x + y
self._add_result.set_value(r)
return ("ok", r)
@request()
@return_reply(Timestamp())
def request_time(self, req):
"""Return the current time in seconds since the Unix Epoch."""
r = time.time()
self._time_result.set_value(r)
return ("ok", r)
@request(Str())
@return_reply(Str())
def request_eval(self, req, expression):
"""Evaluate a Python expression."""
r = str(eval(expression))
self._eval_result.set_value(r)
return ("ok", r)
@request()
@return_reply(Discrete(FRUIT))
def request_pick_fruit(self, req):
"""Pick a random fruit."""
r = random.choice(self.FRUIT + [None])
if r is None:
return ("fail", "No fruit.")
delay = random.randrange(1,5)
req.inform("Picking will take %d seconds" % delay)
def pick_handler():
self._fruit_result.set_value(r)
req.reply("ok", r)
self.ioloop.add_callback(
self.ioloop.call_later, delay, pick_handler)
raise AsyncReply
@request(Str())
@return_reply()
def request_set_sensor_inactive(self, req, sensor_name):
"""Set sensor status to inactive"""
sensor = self.get_sensor(sensor_name)
ts, status, value = sensor.read()
sensor.set_value(value, sensor.INACTIVE, ts)
return('ok',)
@request(Str())
@return_reply()
def request_set_sensor_unreachable(self, req, sensor_name):
"""Set sensor status to unreachable"""
sensor = self.get_sensor(sensor_name)
ts, status, value = sensor.read()
sensor.set_value(value, sensor.UNREACHABLE, ts)
return('ok',)
def request_raw_reverse(self, req, msg):
"""
A raw request handler to demonstrate the calling convention if
@request decoraters are not used. Reverses the message arguments.
"""
# msg is a katcp.Message.request object
reversed_args = msg.arguments[::-1]
# req.make_reply() makes a katcp.Message.reply using the correct request
# name and message ID
return req.make_reply(*reversed_args)
@tornado.gen.coroutine
def on_shutdown(ioloop, server):
print('Shutting down')
yield server.stop()
ioloop.stop()
if __name__ == "__main__":
ioloop = tornado.ioloop.IOLoop.current()
server = MyServer(server_host, server_port)
server.set_concurrency_options(thread_safe=False, handler_thread=False)
server.set_ioloop(ioloop)
signal.signal(signal.SIGINT, lambda sig, frame: ioloop.add_callback_from_signal(
on_shutdown, ioloop, server))
ioloop.add_callback(server.start)
ioloop.start()
| 32 | 89 | 0.639881 |
from __future__ import absolute_import, division, print_function
import random
import signal
import time
import tornado
from katcp import AsyncReply, DeviceServer, ProtocolFlags, Sensor
from katcp.kattypes import (Discrete, Float, Str, Timestamp, request,
return_reply)
server_host = ""
server_port = 5000
class MyServer(DeviceServer):
VERSION_INFO = ("example-api", 1, 0)
BUILD_INFO = ("example-implementation", 0, 1, "")
PROTOCOL_INFO = ProtocolFlags(5, 0, set([
ProtocolFlags.MULTI_CLIENT,
ProtocolFlags.MESSAGE_IDS,
]))
FRUIT = [
"apple", "banana", "pear", "kiwi",
]
def setup_sensors(self):
self._add_result = Sensor.float("add.result",
"Last ?add result.", "", [-10000, 10000])
self._add_result.set_value(0, Sensor.UNREACHABLE)
self._time_result = Sensor.timestamp("time.result",
"Last ?time result.", "")
self._time_result.set_value(0, Sensor.INACTIVE)
self._eval_result = Sensor.string("eval.result",
"Last ?eval result.", "")
self._eval_result.set_value('', Sensor.UNKNOWN)
self._fruit_result = Sensor.discrete("fruit.result",
"Last ?pick-fruit result.", "", self.FRUIT)
self._fruit_result.set_value('apple', Sensor.ERROR)
self.add_sensor(self._add_result)
self.add_sensor(self._time_result)
self.add_sensor(self._eval_result)
self.add_sensor(self._fruit_result)
@request(Float(), Float())
@return_reply(Float())
def request_add(self, req, x, y):
r = x + y
self._add_result.set_value(r)
return ("ok", r)
@request()
@return_reply(Timestamp())
def request_time(self, req):
r = time.time()
self._time_result.set_value(r)
return ("ok", r)
@request(Str())
@return_reply(Str())
def request_eval(self, req, expression):
r = str(eval(expression))
self._eval_result.set_value(r)
return ("ok", r)
@request()
@return_reply(Discrete(FRUIT))
def request_pick_fruit(self, req):
r = random.choice(self.FRUIT + [None])
if r is None:
return ("fail", "No fruit.")
delay = random.randrange(1,5)
req.inform("Picking will take %d seconds" % delay)
def pick_handler():
self._fruit_result.set_value(r)
req.reply("ok", r)
self.ioloop.add_callback(
self.ioloop.call_later, delay, pick_handler)
raise AsyncReply
@request(Str())
@return_reply()
def request_set_sensor_inactive(self, req, sensor_name):
sensor = self.get_sensor(sensor_name)
ts, status, value = sensor.read()
sensor.set_value(value, sensor.INACTIVE, ts)
return('ok',)
@request(Str())
@return_reply()
def request_set_sensor_unreachable(self, req, sensor_name):
sensor = self.get_sensor(sensor_name)
ts, status, value = sensor.read()
sensor.set_value(value, sensor.UNREACHABLE, ts)
return('ok',)
def request_raw_reverse(self, req, msg):
reversed_args = msg.arguments[::-1]
return req.make_reply(*reversed_args)
@tornado.gen.coroutine
def on_shutdown(ioloop, server):
print('Shutting down')
yield server.stop()
ioloop.stop()
if __name__ == "__main__":
ioloop = tornado.ioloop.IOLoop.current()
server = MyServer(server_host, server_port)
server.set_concurrency_options(thread_safe=False, handler_thread=False)
server.set_ioloop(ioloop)
signal.signal(signal.SIGINT, lambda sig, frame: ioloop.add_callback_from_signal(
on_shutdown, ioloop, server))
ioloop.add_callback(server.start)
ioloop.start()
| true | true |
f71741bac27bc1f5ba082d55bf8dd41a6deacf3b | 2,588 | py | Python | Estructura de datos y su procesamiento/Actividad8-TuplaNominada3.py | fernandomireles/University | 5ebf3cf3e3093a8853cc4903e6f617dda7df4336 | [
"MIT"
] | null | null | null | Estructura de datos y su procesamiento/Actividad8-TuplaNominada3.py | fernandomireles/University | 5ebf3cf3e3093a8853cc4903e6f617dda7df4336 | [
"MIT"
] | null | null | null | Estructura de datos y su procesamiento/Actividad8-TuplaNominada3.py | fernandomireles/University | 5ebf3cf3e3093a8853cc4903e6f617dda7df4336 | [
"MIT"
] | 1 | 2021-08-18T15:21:23.000Z | 2021-08-18T15:21:23.000Z | """
Codificar un algoritmo en Python que permita registrar la clave (Por el momento,
no esn ecesario validar si la clave es unica), el nombre y correo electrónico
de múltiples personas, hasta que el usuario indique que ha concluído con la captura
correspondiente (proponga usted el mecanismo para esto).
Una vez concluída la captura, se deberá desplegar el listado completo de las personas registradas.
NOTA: Puede elegir utilizar tupla nominadas (recomendable) o bien, listas anidadadas"""
SEPARADOR = ("*" * 20)
from collections import namedtuple # Librería para tuplas nominadas
Personas = namedtuple("Personas",["clave","nombre","correoElectronico"]) # Declaración de estructura de tupla
ListaPersonas=[] # Lista vacía para meter datos (tuplas)
while True: #Menú de opciones polivalentes
print("\n-- Bienvenido(a) al Menu")
print("1) Agregar una persona")
print("2) Búsqueda específica")
print("3) Ver listado completo")
print("4) Salir")
opcionElegida = input("> ")
if opcionElegida == "4": # Salida
print("Gracias por usar el programa, buen día")
break
if opcionElegida == "1": # Agregar persona
clave = input("Porfavor ingrese su clave: ")
nombre = input("Porfavor ingrese su nombre: ")
correoElectronico = input("Porfavor introduzca su correo electrónico: ")
TuplaPersona = Personas(clave,nombre,correoElectronico) # Se organiza la tupla temporal
print(SEPARADOR)
ListaPersonas.append(TuplaPersona) # Se almacena en lista la tupla temporal
if opcionElegida == "2": # Búsqueda específica
if ListaPersonas:
claveBuscado = input("Ingrese la clave a buscar: ")
for busqueda in ListaPersonas:
if(busqueda.clave) == claveBuscado:
print("\nHemos encontrado la clave:", claveBuscado)
print("El nombre es:", busqueda.nombre, "y su correo:", busqueda.correoElectronico)
else:
print("No se encuentra ningun registro")
if opcionElegida == "3": # Impresión de listado completo
if ListaPersonas:
print("\nListado completo de personas:")
print("|{:<10}|{:<15}|{:<25}|".format("Clave","Nombre","Correo electrónico"))
for entrada in ListaPersonas: # Ciclo "for" para impresión vertical
print("|{:<10}|{:<15}|{:<25}|".format(entrada.clave,entrada.nombre,entrada.correoElectronico))
else:
print("No se encuentra ningún registro")
| 47.925926 | 111 | 0.651468 |
SEPARADOR = ("*" * 20)
from collections import namedtuple
Personas = namedtuple("Personas",["clave","nombre","correoElectronico"])
ListaPersonas=[]
while True:
print("\n-- Bienvenido(a) al Menu")
print("1) Agregar una persona")
print("2) Búsqueda específica")
print("3) Ver listado completo")
print("4) Salir")
opcionElegida = input("> ")
if opcionElegida == "4":
print("Gracias por usar el programa, buen día")
break
if opcionElegida == "1":
clave = input("Porfavor ingrese su clave: ")
nombre = input("Porfavor ingrese su nombre: ")
correoElectronico = input("Porfavor introduzca su correo electrónico: ")
TuplaPersona = Personas(clave,nombre,correoElectronico)
print(SEPARADOR)
ListaPersonas.append(TuplaPersona)
if opcionElegida == "2":
if ListaPersonas:
claveBuscado = input("Ingrese la clave a buscar: ")
for busqueda in ListaPersonas:
if(busqueda.clave) == claveBuscado:
print("\nHemos encontrado la clave:", claveBuscado)
print("El nombre es:", busqueda.nombre, "y su correo:", busqueda.correoElectronico)
else:
print("No se encuentra ningun registro")
if opcionElegida == "3":
if ListaPersonas:
print("\nListado completo de personas:")
print("|{:<10}|{:<15}|{:<25}|".format("Clave","Nombre","Correo electrónico"))
for entrada in ListaPersonas:
print("|{:<10}|{:<15}|{:<25}|".format(entrada.clave,entrada.nombre,entrada.correoElectronico))
else:
print("No se encuentra ningún registro")
| true | true |
f717423cc3d548d9864cbc9e2e9fcc26f024bacd | 1,833 | py | Python | backend/mlarchive/bin/check_spam_legacy.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 6 | 2022-03-09T23:10:28.000Z | 2022-03-21T05:32:40.000Z | backend/mlarchive/bin/check_spam_legacy.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 5 | 2022-03-11T09:39:47.000Z | 2022-03-30T16:48:09.000Z | backend/mlarchive/bin/check_spam_legacy.py | dkg/mailarch | 562757c09e212c202c35231d7e7c588cd4d3fb65 | [
"BSD-3-Clause"
] | 4 | 2022-03-04T15:36:19.000Z | 2022-03-28T23:45:44.000Z | #!../../../env/bin/python
"""
Script to scan through archive of mbox files and produce a spam report.
"""
# Standalone broilerplate -------------------------------------------------------------
from django_setup import do_setup
do_setup()
# -------------------------------------------------------------------------------------
import argparse
import email
import logging
import os
import shutil
import subprocess
import sys
from django.conf import settings
from mlarchive.bin.scan_utils import get_messages
progname = sys.argv[0]
from django.utils.log import getLogger
import logging.config
logging.config.dictConfig(settings.LOGGING)
logger = getLogger('mlarchive.custom')
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
os.chmod(path,0o2777)
def main():
parser = argparse.ArgumentParser(description='Scan archive for spam.')
parser.add_argument('path')
parser.add_argument('-v','--verbose', help='verbose output',action='store_true')
args = parser.parse_args()
if not os.path.isdir(args.path):
parser.error('{} must be a directory'.format(args.path))
fullnames = [ os.path.join(args.path,n) for n in os.listdir(args.path) ]
elists = list(filter(os.path.isdir,fullnames))
for elist in elists:
total = 0
spam = 0
for msg in get_messages(elist):
total += 1
# scan
p = subprocess.Popen(['spamc','-c'], stdin=subprocess.PIPE)
p.communicate(input=msg.as_string())
if p.returncode != 0:
# the message is spam
spam += 1
if args.verbose:
print("%s: spam" % elist)
# print stats
print("{}, {}:{}".format(os.path.basename(elist),total,spam))
if __name__ == "__main__":
main()
| 27.772727 | 87 | 0.585379 |
from django_setup import do_setup
do_setup()
import argparse
import email
import logging
import os
import shutil
import subprocess
import sys
from django.conf import settings
from mlarchive.bin.scan_utils import get_messages
progname = sys.argv[0]
from django.utils.log import getLogger
import logging.config
logging.config.dictConfig(settings.LOGGING)
logger = getLogger('mlarchive.custom')
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
os.chmod(path,0o2777)
def main():
parser = argparse.ArgumentParser(description='Scan archive for spam.')
parser.add_argument('path')
parser.add_argument('-v','--verbose', help='verbose output',action='store_true')
args = parser.parse_args()
if not os.path.isdir(args.path):
parser.error('{} must be a directory'.format(args.path))
fullnames = [ os.path.join(args.path,n) for n in os.listdir(args.path) ]
elists = list(filter(os.path.isdir,fullnames))
for elist in elists:
total = 0
spam = 0
for msg in get_messages(elist):
total += 1
p = subprocess.Popen(['spamc','-c'], stdin=subprocess.PIPE)
p.communicate(input=msg.as_string())
if p.returncode != 0:
spam += 1
if args.verbose:
print("%s: spam" % elist)
print("{}, {}:{}".format(os.path.basename(elist),total,spam))
if __name__ == "__main__":
main()
| true | true |
f717427fe7f805450f84f4ef7ba5df39674ef6cb | 348 | py | Python | ja/code_snippets/api-embeds-enable.py | quotecenter/documentation-1 | f365703264761aa2b19d5d1d8ec55a3a6082ef4d | [
"BSD-3-Clause"
] | null | null | null | ja/code_snippets/api-embeds-enable.py | quotecenter/documentation-1 | f365703264761aa2b19d5d1d8ec55a3a6082ef4d | [
"BSD-3-Clause"
] | null | null | null | ja/code_snippets/api-embeds-enable.py | quotecenter/documentation-1 | f365703264761aa2b19d5d1d8ec55a3a6082ef4d | [
"BSD-3-Clause"
] | null | null | null | from datadog import initialize, api
# Intialize request parameters including API/APP key
options = {
'api_key': '<YOUR_API_KEY>',
'app_key': '<YOUR_APP_KEY>'
}
initialize(**options)
# Set Embed ID (token)
embed_id = "5f585b01c81b12ecdf5f40df0382738d0919170639985d3df5e2fc4232865b0c"
# Call Embed API function
api.Embed.enable(embed_id)
| 21.75 | 77 | 0.761494 | from datadog import initialize, api
options = {
'api_key': '<YOUR_API_KEY>',
'app_key': '<YOUR_APP_KEY>'
}
initialize(**options)
embed_id = "5f585b01c81b12ecdf5f40df0382738d0919170639985d3df5e2fc4232865b0c"
api.Embed.enable(embed_id)
| true | true |
f71742b2238dd40bc0984373a331e975450b3324 | 2,827 | py | Python | tests/TestAptChefProvisionerPlugin.py | dhellmann/aminator | 96efa7d5690bfae2c20b21f0b417b2784f6cb085 | [
"Apache-2.0"
] | null | null | null | tests/TestAptChefProvisionerPlugin.py | dhellmann/aminator | 96efa7d5690bfae2c20b21f0b417b2784f6cb085 | [
"Apache-2.0"
] | null | null | null | tests/TestAptChefProvisionerPlugin.py | dhellmann/aminator | 96efa7d5690bfae2c20b21f0b417b2784f6cb085 | [
"Apache-2.0"
] | 1 | 2020-01-06T16:18:22.000Z | 2020-01-06T16:18:22.000Z | # -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import logging
import json
from aminator.config import Config
from aminator.plugins.provisioner.apt_chef import AptChefProvisionerPlugin
log = logging.getLogger(__name__)
console = logging.StreamHandler()
# add the handler to the root logger
logging.getLogger('').addHandler(console)
class TestAptChefProvisionerPlugin(object):
def setup_method(self, method):
self.chef_provisioner = AptChefProvisionerPlugin()
self.chef_provisioner._config = Config()
self.chef_provisioner._config.context = Config()
self.chef_provisioner._config.context.chef = Config()
self.chef_provisioner._config.context.package = Config()
self.chef_provisioner._config.pkg_attributes = ['name', 'version', 'release', 'build_job', 'build_number']
self.chef_provisioner._config.context.chef.dir = "./tests"
self.chef_provisioner._config.context.chef.json = "test_chef_node.json"
def test_parse_json(self):
# given a JSON doc, what's the name, version, release string, etc
# this is more a direct test of the ChefJSON mapping
with open(self.chef_provisioner._get_chef_json_full_path()) as chef_json_file:
my_json = json.load(chef_json_file)
assert "helloworld" == my_json['name']
assert "APP-helloworld" == my_json['build_job']
assert "1.0" == my_json['version']
assert "277" == my_json['release']
assert "33a9d1cac7686c8a46c1f330add2e8d36850fd15" == my_json['change']
assert isinstance(my_json['run_list'], list)
assert "recipe[helloworld]" == my_json['run_list'][0]
def test_metadata(self):
self.chef_provisioner._store_package_metadata()
assert "helloworld" == self.chef_provisioner._config.context.package.attributes['name']
assert "1.0" == self.chef_provisioner._config.context.package.attributes['version']
assert "277" == self.chef_provisioner._config.context.package.attributes['release']
assert "APP-helloworld" == self.chef_provisioner._config.context.package.attributes['build_job']
assert "277" == self.chef_provisioner._config.context.package.attributes['build_number']
| 41.573529 | 114 | 0.710294 |
import logging
import json
from aminator.config import Config
from aminator.plugins.provisioner.apt_chef import AptChefProvisionerPlugin
log = logging.getLogger(__name__)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
class TestAptChefProvisionerPlugin(object):
def setup_method(self, method):
self.chef_provisioner = AptChefProvisionerPlugin()
self.chef_provisioner._config = Config()
self.chef_provisioner._config.context = Config()
self.chef_provisioner._config.context.chef = Config()
self.chef_provisioner._config.context.package = Config()
self.chef_provisioner._config.pkg_attributes = ['name', 'version', 'release', 'build_job', 'build_number']
self.chef_provisioner._config.context.chef.dir = "./tests"
self.chef_provisioner._config.context.chef.json = "test_chef_node.json"
def test_parse_json(self):
# this is more a direct test of the ChefJSON mapping
with open(self.chef_provisioner._get_chef_json_full_path()) as chef_json_file:
my_json = json.load(chef_json_file)
assert "helloworld" == my_json['name']
assert "APP-helloworld" == my_json['build_job']
assert "1.0" == my_json['version']
assert "277" == my_json['release']
assert "33a9d1cac7686c8a46c1f330add2e8d36850fd15" == my_json['change']
assert isinstance(my_json['run_list'], list)
assert "recipe[helloworld]" == my_json['run_list'][0]
def test_metadata(self):
self.chef_provisioner._store_package_metadata()
assert "helloworld" == self.chef_provisioner._config.context.package.attributes['name']
assert "1.0" == self.chef_provisioner._config.context.package.attributes['version']
assert "277" == self.chef_provisioner._config.context.package.attributes['release']
assert "APP-helloworld" == self.chef_provisioner._config.context.package.attributes['build_job']
assert "277" == self.chef_provisioner._config.context.package.attributes['build_number']
| true | true |
f71742c671520b22e56777c333e9e3fd20648561 | 2,331 | py | Python | tests/models/validators/v1_3_0/jsd_d9bdb9034df99dba.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 32 | 2019-09-05T05:16:56.000Z | 2022-03-22T09:50:38.000Z | tests/models/validators/v1_3_0/jsd_d9bdb9034df99dba.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 35 | 2019-09-07T18:58:54.000Z | 2022-03-24T19:29:36.000Z | tests/models/validators/v1_3_0/jsd_d9bdb9034df99dba.py | oboehmer/dnacentersdk | 25c4e99900640deee91a56aa886874d9cb0ca960 | [
"MIT"
] | 18 | 2019-09-09T11:07:21.000Z | 2022-03-25T08:49:59.000Z | # -*- coding: utf-8 -*-
"""Cisco DNA Center Get Site Count data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD9BdB9034Df99Dba(object):
"""Get Site Count request schema definition."""
def __init__(self):
super(JSONSchemaValidatorD9BdB9034Df99Dba, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"response": {
"type": [
"string",
"null"
]
},
"version": {
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 32.830986 | 78 | 0.639211 |
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD9BdB9034Df99Dba(object):
def __init__(self):
super(JSONSchemaValidatorD9BdB9034Df99Dba, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"response": {
"type": [
"string",
"null"
]
},
"version": {
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| true | true |
f71744883794a583ccf9436508cd8c98a384800a | 1,579 | py | Python | artascope/test/lib/test_user_status_manager.py | magus0219/icloud-photo-downloader | 6334530d971cf61089d031de99a38f204c201837 | [
"MIT"
] | 3 | 2020-09-24T16:19:28.000Z | 2022-02-09T21:10:11.000Z | artascope/test/lib/test_user_status_manager.py | magus0219/icloud-photo-downloader | 6334530d971cf61089d031de99a38f204c201837 | [
"MIT"
] | null | null | null | artascope/test/lib/test_user_status_manager.py | magus0219/icloud-photo-downloader | 6334530d971cf61089d031de99a38f204c201837 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created by magus0219[magus0219@gmail.com] on 2020/3/30
import datetime
from artascope.src.lib.user_status_manager import usm
class TestUserStatusManager:
def test_add_user(self):
usm.add_user(username="username")
us = usm.get_user(username="username")
assert (
us.username == "username"
and type(us.created_ts) == int
and us.created_ts < datetime.datetime.now().timestamp()
)
def test_add_existed_user(self):
usm.add_user(username="username")
usm.add_user(username="username")
us_list = usm.get_all_user()
assert len(us_list) == 1
def test_get_user(self):
assert usm.get_user("not_exist") is None
usm.add_user(username="username")
assert usm.get_user("username").username == "username"
def test_exist_user(self):
assert usm.exist_user("not_exist") is False
usm.add_user(username="username")
assert usm.exist_user("username") is True
def test_get_all_user(self):
assert usm.get_all_user() is None
usm.add_user(username="username1")
usm.add_user(username="username2")
us_list = usm.get_all_user()
assert us_list[0].username == "username1"
assert us_list[1].username == "username2"
assert len(us_list) == 2
def test_add_dup_user(self):
usm.add_user(username="username")
usm.add_user(username="username")
us_list = usm.get_all_user()
assert len(us_list) == 1
| 28.709091 | 67 | 0.636479 |
import datetime
from artascope.src.lib.user_status_manager import usm
class TestUserStatusManager:
def test_add_user(self):
usm.add_user(username="username")
us = usm.get_user(username="username")
assert (
us.username == "username"
and type(us.created_ts) == int
and us.created_ts < datetime.datetime.now().timestamp()
)
def test_add_existed_user(self):
usm.add_user(username="username")
usm.add_user(username="username")
us_list = usm.get_all_user()
assert len(us_list) == 1
def test_get_user(self):
assert usm.get_user("not_exist") is None
usm.add_user(username="username")
assert usm.get_user("username").username == "username"
def test_exist_user(self):
assert usm.exist_user("not_exist") is False
usm.add_user(username="username")
assert usm.exist_user("username") is True
def test_get_all_user(self):
assert usm.get_all_user() is None
usm.add_user(username="username1")
usm.add_user(username="username2")
us_list = usm.get_all_user()
assert us_list[0].username == "username1"
assert us_list[1].username == "username2"
assert len(us_list) == 2
def test_add_dup_user(self):
usm.add_user(username="username")
usm.add_user(username="username")
us_list = usm.get_all_user()
assert len(us_list) == 1
| true | true |
f7174560bad50e0fdbb28b5776553cba721ab30f | 2,919 | py | Python | swagger_client/models/all_of_permission_set_administration_rights.py | ike709/tgs4-api-pyclient | 97918cfe614cc4ef06ef2485efff163417a8cd44 | [
"MIT"
] | null | null | null | swagger_client/models/all_of_permission_set_administration_rights.py | ike709/tgs4-api-pyclient | 97918cfe614cc4ef06ef2485efff163417a8cd44 | [
"MIT"
] | null | null | null | swagger_client/models/all_of_permission_set_administration_rights.py | ike709/tgs4-api-pyclient | 97918cfe614cc4ef06ef2485efff163417a8cd44 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
TGS API
A production scale tool for BYOND server management # noqa: E501
OpenAPI spec version: 9.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.administration_rights import AdministrationRights # noqa: F401,E501
class AllOfPermissionSetAdministrationRights(AdministrationRights):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
if hasattr(AdministrationRights, "swagger_types"):
swagger_types.update(AdministrationRights.swagger_types)
attribute_map = {
}
if hasattr(AdministrationRights, "attribute_map"):
attribute_map.update(AdministrationRights.attribute_map)
def __init__(self, *args, **kwargs): # noqa: E501
"""AllOfPermissionSetAdministrationRights - a model defined in Swagger""" # noqa: E501
self.discriminator = None
AdministrationRights.__init__(self, *args, **kwargs)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AllOfPermissionSetAdministrationRights, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AllOfPermissionSetAdministrationRights):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.076923 | 95 | 0.59815 |
import pprint
import re
import six
from swagger_client.models.administration_rights import AdministrationRights
class AllOfPermissionSetAdministrationRights(AdministrationRights):
swagger_types = {
}
if hasattr(AdministrationRights, "swagger_types"):
swagger_types.update(AdministrationRights.swagger_types)
attribute_map = {
}
if hasattr(AdministrationRights, "attribute_map"):
attribute_map.update(AdministrationRights.attribute_map)
def __init__(self, *args, **kwargs):
self.discriminator = None
AdministrationRights.__init__(self, *args, **kwargs)
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AllOfPermissionSetAdministrationRights, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AllOfPermissionSetAdministrationRights):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f71747aaed07f08c55da22df67f2dc5bc6ae9a92 | 742 | py | Python | ocr/paint.py | BumagniyPacket/ocr | f2651f3a23cf835a689b35a658ef3443086fd72a | [
"Apache-2.0"
] | null | null | null | ocr/paint.py | BumagniyPacket/ocr | f2651f3a23cf835a689b35a658ef3443086fd72a | [
"Apache-2.0"
] | null | null | null | ocr/paint.py | BumagniyPacket/ocr | f2651f3a23cf835a689b35a658ef3443086fd72a | [
"Apache-2.0"
] | 1 | 2019-02-07T19:56:33.000Z | 2019-02-07T19:56:33.000Z | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
def show_image(image):
plt.imshow(-image, cmap='Greys')
plt.show()
def show_two(image1, image2):
plt.subplot(121)
plt.imshow(-image1, cmap='Greys')
plt.subplot(122)
plt.imshow(-image2, cmap='Greys')
plt.show()
def plot_hist(img):
plt.hist(img.ravel(), 256, range=(0., 1.), color='red')
plt.show()
def plot_2img_2hist(image1, image2):
plt.subplot(221)
plt.imshow(-image1, cmap='Greys')
plt.subplot(223)
plt.hist(image1.ravel(), 256, range=(0., 1.), color='red')
plt.subplot(222)
plt.imshow(-image2, cmap='Greys')
plt.subplot(224)
plt.hist(image2.ravel(), 256, range=(0., 1.), color='red')
plt.show()
| 18.55 | 62 | 0.610512 |
import matplotlib.pyplot as plt
def show_image(image):
plt.imshow(-image, cmap='Greys')
plt.show()
def show_two(image1, image2):
plt.subplot(121)
plt.imshow(-image1, cmap='Greys')
plt.subplot(122)
plt.imshow(-image2, cmap='Greys')
plt.show()
def plot_hist(img):
plt.hist(img.ravel(), 256, range=(0., 1.), color='red')
plt.show()
def plot_2img_2hist(image1, image2):
plt.subplot(221)
plt.imshow(-image1, cmap='Greys')
plt.subplot(223)
plt.hist(image1.ravel(), 256, range=(0., 1.), color='red')
plt.subplot(222)
plt.imshow(-image2, cmap='Greys')
plt.subplot(224)
plt.hist(image2.ravel(), 256, range=(0., 1.), color='red')
plt.show()
| true | true |
f7174853e5691cb8f3c8388d4ff3c6a48d541046 | 21,576 | py | Python | gpMgmt/bin/gppylib/commands/base.py | abhisheknishant138/gpdb | 1805743d505837026aa137cabb8a7072d745a129 | [
"PostgreSQL",
"Apache-2.0"
] | 4 | 2017-11-28T08:12:58.000Z | 2020-10-28T04:15:52.000Z | gpMgmt/bin/gppylib/commands/base.py | abhisheknishant138/gpdb | 1805743d505837026aa137cabb8a7072d745a129 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gppylib/commands/base.py | abhisheknishant138/gpdb | 1805743d505837026aa137cabb8a7072d745a129 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
"""
base.py
common base for the commands execution framework. Units of work are defined as Operations
as found in other modules like unix.py. These units of work are then packaged up and executed
within a GpCommand. A GpCommand is just a common infrastructure for executing an Operation.
The general idea is that the application developer breaks the problem down into a set of
GpCommands that need to be executed. This class also provides a queue and set of workers
for executing this set of commands.
"""
from queue import Queue, Empty
from threading import Thread
import os
import signal
import subprocess
import sys
import time
from gppylib import gplog
from gppylib import gpsubprocess
from pg import DB
logger = gplog.get_default_logger()
GPHOME = os.environ.get('GPHOME')
# Maximum retries if sshd rejects the connection due to too many
# unauthenticated connections.
SSH_MAX_RETRY = 10
# Delay before retrying ssh connection, in seconds
SSH_RETRY_DELAY = .5
class WorkerPool(object):
"""TODO:"""
halt_command = 'halt command'
def __init__(self, numWorkers=16, items=None, daemonize=False, logger=gplog.get_default_logger()):
if numWorkers <= 0:
raise Exception("WorkerPool(): numWorkers should be greater than 0.")
self.workers = []
self.should_stop = False
self.work_queue = Queue()
self.completed_queue = Queue()
self._assigned = 0
self.daemonize = daemonize
self.logger = logger
if items is not None:
for item in items:
self.addCommand(item)
for i in range(0, numWorkers):
w = Worker("worker%d" % i, self)
self.workers.append(w)
w.start()
self.numWorkers = numWorkers
###
def getNumWorkers(self):
return self.numWorkers
def getNextWorkItem(self):
return self.work_queue.get(block=True)
def addFinishedWorkItem(self, command):
self.completed_queue.put(command)
self.work_queue.task_done()
def markTaskDone(self):
self.work_queue.task_done()
def addCommand(self, cmd):
self.logger.debug("Adding cmd to work_queue: %s" % cmd.cmdStr)
self.work_queue.put(cmd)
self._assigned += 1
def _join_work_queue_with_timeout(self, timeout):
"""
Queue.join() unfortunately doesn't take a timeout (see
https://bugs.python.org/issue9634). Fake it here, with a solution
inspired by notes on that bug report.
XXX This solution uses undocumented Queue internals (though they are not
underscore-prefixed...).
"""
done_condition = self.work_queue.all_tasks_done
done_condition.acquire()
try:
while self.work_queue.unfinished_tasks:
if (timeout <= 0):
# Timed out.
return False
start_time = time.time()
done_condition.wait(timeout)
timeout -= (time.time() - start_time)
finally:
done_condition.release()
return True
def join(self, timeout=None):
"""
Waits (up to an optional timeout) for the worker queue to be fully
completed, and returns True if the pool is now done with its work.
A None timeout indicates that join() should wait forever; the return
value is always True in this case. Zero and negative timeouts indicate
that join() will query the queue status and return immediately, whether
the queue is done or not.
"""
if timeout is None:
self.work_queue.join()
return True
return self._join_work_queue_with_timeout(timeout)
def joinWorkers(self):
for w in self.workers:
w.join()
def _pop_completed(self):
"""
Pops an item off the completed queue and decrements the assigned count.
If the queue is empty, throws Queue.Empty.
"""
item = self.completed_queue.get(False)
self._assigned -= 1
return item
def getCompletedItems(self):
completed_list = []
try:
while True:
item = self._pop_completed() # will throw Empty
if item is not None:
completed_list.append(item)
except Empty:
return completed_list
def check_results(self):
""" goes through all items in the completed_queue and throws an exception at the
first one that didn't execute successfully
throws ExecutionError
"""
try:
while True:
item = self._pop_completed() # will throw Empty
if not item.get_results().wasSuccessful():
raise ExecutionError("Error Executing Command: ", item)
except Empty:
return
def empty_completed_items(self):
while not self.completed_queue.empty():
self._pop_completed()
def isDone(self):
# TODO: not sure that qsize() is safe
return (self.assigned == self.completed_queue.qsize())
@property
def assigned(self):
"""
A read-only count of the number of commands that have been added to the
pool. This count is only decremented when items are removed from the
completed queue via getCompletedItems(), empty_completed_items(), or
check_results().
"""
return self._assigned
@property
def completed(self):
"""
A read-only count of the items in the completed queue. Will be reset to
zero after a call to empty_completed_items() or getCompletedItems().
"""
return self.completed_queue.qsize()
def haltWork(self):
self.logger.debug("WorkerPool haltWork()")
self.should_stop = True
for w in self.workers:
w.haltWork()
self.work_queue.put(self.halt_command)
def join_and_indicate_progress(pool, outfile=sys.stdout, interval=1):
"""
Waits for a WorkerPool to complete its work, flushing dots to stdout every
second. If any dots are printed (i.e. the work takes longer than the
printing interval), a newline is also printed upon completion.
The file to print to and the interval between printings can be overridden.
"""
printed = False
while not pool.join(interval):
outfile.write('.')
outfile.flush()
printed = True
if printed:
outfile.write('\n')
class OperationWorkerPool(WorkerPool):
""" TODO: This is a hack! In reality, the WorkerPool should work with Operations, and
Command should be a subclass of Operation. Till then, we'll spoof the necessary Command
functionality within Operation. """
def __init__(self, numWorkers=16, operations=None):
if operations is not None:
for operation in operations:
self._spoof_operation(operation)
super(OperationWorkerPool, self).__init__(numWorkers, operations)
def check_results(self):
raise NotImplementedError("OperationWorkerPool has no means of verifying success.")
def _spoof_operation(self, operation):
operation.cmdStr = str(operation)
class Worker(Thread):
"""TODO:"""
pool = None
cmd = None
name = None
logger = None
def __init__(self, name, pool):
self.name = name
self.pool = pool
self.logger = logger
Thread.__init__(self)
self.daemon = pool.daemonize
def run(self):
while True:
try:
try:
self.cmd = self.pool.getNextWorkItem()
except TypeError:
# misleading exception raised during interpreter shutdown
return
# we must have got a command to run here
if self.cmd is None:
self.logger.debug("[%s] got a None cmd" % self.name)
self.pool.markTaskDone()
elif self.cmd is self.pool.halt_command:
self.logger.debug("[%s] got a halt cmd" % self.name)
self.pool.markTaskDone()
self.cmd = None
return
elif self.pool.should_stop:
self.logger.debug("[%s] got cmd and pool is stopped: %s" % (self.name, self.cmd))
self.pool.markTaskDone()
self.cmd = None
else:
self.logger.debug("[%s] got cmd: %s" % (self.name, self.cmd.cmdStr))
self.cmd.run()
self.logger.debug("[%s] finished cmd: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd = None
except Exception as e:
self.logger.exception(e)
if self.cmd:
self.logger.debug("[%s] finished cmd with exception: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd = None
def haltWork(self):
self.logger.debug("[%s] haltWork" % self.name)
# this was originally coded as
#
# if self.cmd is not None:
# self.cmd.interrupt()
# self.cmd.cancel()
#
# but as observed in MPP-13808, the worker thread's run() loop may set self.cmd to None
# past the point where the calling thread checks self.cmd for None, leading to a curious
# "'NoneType' object has no attribute 'cancel' exception" which may prevent the worker pool's
# haltWorkers() from actually halting all the workers.
#
c = self.cmd
if c is not None and isinstance(c, Command):
c.interrupt()
c.cancel()
"""
TODO: consider just having a single interface that needs to be implemented for
describing work to allow the Workers to use it. This would allow the user
to better provide logic necessary. i.e. even though the user wants to
execute a unix command... how the results are interpretted are highly
application specific. So we should have a separate level of abstraction
for executing UnixCommands and DatabaseCommands from this one.
other things to think about:
-- how to support cancel
-- how to support progress
-- undo?
-- blocking vs. unblocking
"""
# --------------------------------NEW WORLD-----------------------------------
class CommandResult():
""" Used as a way to package up the results from a GpCommand
"""
# rc,stdout,stderr,completed,halt
def __init__(self, rc, stdout, stderr, completed, halt, pickled=False):
self.rc = rc
if pickled:
self.stdout = stdout
else:
self.stdout = stdout.decode()
self.stderr = stderr.decode()
self.completed = completed
self.halt = halt
def printResult(self):
res = "cmd had rc=%s completed=%s halted=%s\n stdout='%s'\n " \
"stderr='%s'" % (str(self.rc), str(self.completed), str(self.halt), self.stdout, self.stderr)
return res
def wasSuccessful(self):
if self.halt:
return False
if not self.completed:
return False
if self.rc != 0:
return False
return True
def __str__(self):
return self.printResult()
def split_stdout(self, how=':'):
"""
TODO: AK: This doesn't belong here if it pertains only to pg_controldata.
MPP-16318: Skip over discrepancies in the pg_controldata stdout, as it's
not this code's responsibility to judge the pg_controldata stdout. This is
especially true for 'immediate' shutdown, in which case, we won't even
care for WARNINGs or other pg_controldata discrepancies.
"""
for line in self.stdout.split('\n'):
ret = line.split(how, 1)
if len(ret) == 2:
yield ret
class ExecutionError(Exception):
def __init__(self, summary, cmd):
self.summary = summary
self.cmd = cmd
def __str__(self):
# TODO: improve dumping of self.cmd
return "ExecutionError: '%s' occurred. Details: '%s' %s" % \
(self.summary, self.cmd.cmdStr, self.cmd.get_results().printResult())
# specify types of execution contexts.
LOCAL = 1
REMOTE = 2
gExecutionContextFactory = None
#
# @param factory needs to have a createExecutionContext(self, execution_context_id, remoteHost, stdin) function
#
def setExecutionContextFactory(factory):
global gExecutionContextFactory
gExecutionContextFactory = factory
def createExecutionContext(execution_context_id, remoteHost, stdin, gphome=None):
if gExecutionContextFactory is not None:
return gExecutionContextFactory.createExecutionContext(execution_context_id, remoteHost, stdin)
elif execution_context_id == LOCAL:
return LocalExecutionContext(stdin)
elif execution_context_id == REMOTE:
if remoteHost is None:
raise Exception("Programmer Error. Specified REMOTE execution context but didn't provide a remoteHost")
return RemoteExecutionContext(remoteHost, stdin, gphome)
class ExecutionContext():
""" An ExecutionContext defines where and how to execute the Command and how to
gather up information that are the results of the command.
"""
def __init__(self):
pass
def execute(self, cmd):
pass
def interrupt(self):
pass
def cancel(self):
pass
class LocalExecutionContext(ExecutionContext):
proc = None
halt = False
completed = False
def __init__(self, stdin):
ExecutionContext.__init__(self)
self.stdin = stdin
pass
def execute(self, cmd, wait=True, pickled=False):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
# also propagate env from command instance specific map
keys = sorted(list(cmd.propagate_env_map.keys()), reverse=True)
for k in keys:
cmd.cmdStr = "%s=%s && %s" % (k, cmd.propagate_env_map[k], cmd.cmdStr)
# executable='/bin/bash' is to ensure the shell is bash. bash isn't the
# actual command executed, but the shell that command string runs under.
self.proc = gpsubprocess.Popen(cmd.cmdStr, env=None, shell=True,
executable='/bin/bash',
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
cmd.pid = self.proc.pid
if wait:
(rc, stdout_value, stderr_value) = self.proc.communicate2(input=self.stdin)
self.completed = True
cmd.set_results(CommandResult(
rc, stdout_value, stderr_value, self.completed, self.halt, pickled=pickled))
def cancel(self):
if self.proc:
try:
os.kill(self.proc.pid, signal.SIGTERM)
except OSError:
pass
def interrupt(self):
self.halt = True
if self.proc:
self.proc.cancel()
class RemoteExecutionContext(LocalExecutionContext):
trail = set()
"""
Leaves a trail of hosts to which we've ssh'ed, during the life of a particular interpreter.
"""
def __init__(self, targetHost, stdin, gphome=None):
LocalExecutionContext.__init__(self, stdin)
self.targetHost = targetHost
if gphome:
self.gphome = gphome
else:
self.gphome = GPHOME
def execute(self, cmd, pickled=False):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
self.__class__.trail.add(self.targetHost)
# also propagate env from command instance specific map
keys = sorted(list(cmd.propagate_env_map.keys()), reverse=True)
for k in keys:
cmd.cmdStr = "%s=%s && %s" % (k, cmd.propagate_env_map[k], cmd.cmdStr)
# Escape \ and " for remote execution
cmd.cmdStr = cmd.cmdStr.replace('\\','\\\\').replace('"', '\\"')
cmd.cmdStr = "ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=60 " \
"{targethost} \"{gphome} {cmdstr}\"".format(targethost=self.targetHost,
gphome=". %s/greenplum_path.sh;" % self.gphome,
cmdstr=cmd.cmdStr)
LocalExecutionContext.execute(self, cmd, pickled=pickled)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd)
pass
def __retry(self, cmd, count=0):
if count == SSH_MAX_RETRY:
return
time.sleep(SSH_RETRY_DELAY)
LocalExecutionContext.execute(self, cmd, pickled=pickled)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd, count + 1)
class Command(object):
""" TODO:
"""
name = None
cmdStr = None
results = None
exec_context = None
propagate_env_map = {} # specific environment variables for this command instance
def __init__(self, name, cmdStr, ctxt=LOCAL, remoteHost=None, stdin=None, gphome=None, pickled=False):
self.name = name
self.cmdStr = cmdStr
self.exec_context = createExecutionContext(ctxt, remoteHost, stdin=stdin,
gphome=gphome)
self.remoteHost = remoteHost
self.logger = gplog.get_default_logger()
self.pickled = pickled
def __str__(self):
if self.results:
return "%s cmdStr='%s' had result: %s" % (self.name, self.cmdStr, self.results)
else:
return "%s cmdStr='%s'" % (self.name, self.cmdStr)
# Start a process that will execute the command but don't wait for
# it to complete. Return the Popen object instead.
def runNoWait(self):
self.exec_context.execute(self, wait=False, pickled=self.pickled)
return self.exec_context.proc
def run(self, validateAfter=False):
self.logger.debug("Running Command: %s" % self.cmdStr)
self.exec_context.execute(self, pickled=self.pickled)
if validateAfter:
self.validate()
pass
def set_results(self, results):
self.results = results
def get_results(self):
return self.results
def get_stdout(self, strip=True):
if self.results is None:
raise Exception("command not run yet")
return self.results.stdout if not strip else self.results.stdout.strip()
def get_stdout_lines(self):
return self.results.stdout.splitlines()
def get_stderr_lines(self):
return self.results.stderr.splitlines()
def get_return_code(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.rc
def get_stderr(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.stderr
def cancel(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.cancel()
def interrupt(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.interrupt()
def was_successful(self):
if self.results is None:
return False
else:
return self.results.wasSuccessful()
def validate(self, expected_rc=0):
"""Plain vanilla validation which expects a 0 return code."""
if self.results.rc != expected_rc:
self.logger.debug(self.results)
raise ExecutionError("non-zero rc: %d" % self.results.rc, self)
class SQLCommand(Command):
"""Base class for commands that execute SQL statements. Classes
that inherit from SQLCOmmand should set cancel_conn to the pygresql
connection they wish to cancel and check self.cancel_flag."""
def __init__(self, name):
Command.__init__(self, name, cmdStr=None)
self.cancel_flag = False
self.cancel_conn = None
def run(self, validateAfter=False):
raise ExecutionError("programmer error. implementors of SQLCommand must implement run()", self)
def interrupt(self):
# No execution context for SQLCommands
pass
def cancel(self):
# assignment is an atomic operation in python
self.cancel_flag = True
# if self.conn is not set we cannot cancel.
if self.cancel_conn:
DB(self.cancel_conn).cancel()
def run_remote_commands(name, commands):
"""
"""
cmds = {}
pool = WorkerPool()
for host, cmdStr in list(commands.items()):
cmd = Command(name=name, cmdStr=cmdStr, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
cmds[host] = cmd
pool.join()
pool.check_results()
return cmds
| 33.451163 | 116 | 0.612023 |
from queue import Queue, Empty
from threading import Thread
import os
import signal
import subprocess
import sys
import time
from gppylib import gplog
from gppylib import gpsubprocess
from pg import DB
logger = gplog.get_default_logger()
GPHOME = os.environ.get('GPHOME')
SSH_MAX_RETRY = 10
SSH_RETRY_DELAY = .5
class WorkerPool(object):
halt_command = 'halt command'
def __init__(self, numWorkers=16, items=None, daemonize=False, logger=gplog.get_default_logger()):
if numWorkers <= 0:
raise Exception("WorkerPool(): numWorkers should be greater than 0.")
self.workers = []
self.should_stop = False
self.work_queue = Queue()
self.completed_queue = Queue()
self._assigned = 0
self.daemonize = daemonize
self.logger = logger
if items is not None:
for item in items:
self.addCommand(item)
for i in range(0, numWorkers):
w = Worker("worker%d" % i, self)
self.workers.append(w)
w.start()
self.numWorkers = numWorkers
def getNumWorkers(self):
return self.numWorkers
def getNextWorkItem(self):
return self.work_queue.get(block=True)
def addFinishedWorkItem(self, command):
self.completed_queue.put(command)
self.work_queue.task_done()
def markTaskDone(self):
self.work_queue.task_done()
def addCommand(self, cmd):
self.logger.debug("Adding cmd to work_queue: %s" % cmd.cmdStr)
self.work_queue.put(cmd)
self._assigned += 1
def _join_work_queue_with_timeout(self, timeout):
done_condition = self.work_queue.all_tasks_done
done_condition.acquire()
try:
while self.work_queue.unfinished_tasks:
if (timeout <= 0):
return False
start_time = time.time()
done_condition.wait(timeout)
timeout -= (time.time() - start_time)
finally:
done_condition.release()
return True
def join(self, timeout=None):
if timeout is None:
self.work_queue.join()
return True
return self._join_work_queue_with_timeout(timeout)
def joinWorkers(self):
for w in self.workers:
w.join()
def _pop_completed(self):
item = self.completed_queue.get(False)
self._assigned -= 1
return item
def getCompletedItems(self):
completed_list = []
try:
while True:
item = self._pop_completed()
if item is not None:
completed_list.append(item)
except Empty:
return completed_list
def check_results(self):
try:
while True:
item = self._pop_completed()
if not item.get_results().wasSuccessful():
raise ExecutionError("Error Executing Command: ", item)
except Empty:
return
def empty_completed_items(self):
while not self.completed_queue.empty():
self._pop_completed()
def isDone(self):
return (self.assigned == self.completed_queue.qsize())
@property
def assigned(self):
return self._assigned
@property
def completed(self):
return self.completed_queue.qsize()
def haltWork(self):
self.logger.debug("WorkerPool haltWork()")
self.should_stop = True
for w in self.workers:
w.haltWork()
self.work_queue.put(self.halt_command)
def join_and_indicate_progress(pool, outfile=sys.stdout, interval=1):
printed = False
while not pool.join(interval):
outfile.write('.')
outfile.flush()
printed = True
if printed:
outfile.write('\n')
class OperationWorkerPool(WorkerPool):
def __init__(self, numWorkers=16, operations=None):
if operations is not None:
for operation in operations:
self._spoof_operation(operation)
super(OperationWorkerPool, self).__init__(numWorkers, operations)
def check_results(self):
raise NotImplementedError("OperationWorkerPool has no means of verifying success.")
def _spoof_operation(self, operation):
operation.cmdStr = str(operation)
class Worker(Thread):
pool = None
cmd = None
name = None
logger = None
def __init__(self, name, pool):
self.name = name
self.pool = pool
self.logger = logger
Thread.__init__(self)
self.daemon = pool.daemonize
def run(self):
while True:
try:
try:
self.cmd = self.pool.getNextWorkItem()
except TypeError:
return
if self.cmd is None:
self.logger.debug("[%s] got a None cmd" % self.name)
self.pool.markTaskDone()
elif self.cmd is self.pool.halt_command:
self.logger.debug("[%s] got a halt cmd" % self.name)
self.pool.markTaskDone()
self.cmd = None
return
elif self.pool.should_stop:
self.logger.debug("[%s] got cmd and pool is stopped: %s" % (self.name, self.cmd))
self.pool.markTaskDone()
self.cmd = None
else:
self.logger.debug("[%s] got cmd: %s" % (self.name, self.cmd.cmdStr))
self.cmd.run()
self.logger.debug("[%s] finished cmd: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd = None
except Exception as e:
self.logger.exception(e)
if self.cmd:
self.logger.debug("[%s] finished cmd with exception: %s" % (self.name, self.cmd))
self.pool.addFinishedWorkItem(self.cmd)
self.cmd = None
def haltWork(self):
self.logger.debug("[%s] haltWork" % self.name)
# past the point where the calling thread checks self.cmd for None, leading to a curious
# "'NoneType' object has no attribute 'cancel' exception" which may prevent the worker pool's
c = self.cmd
if c is not None and isinstance(c, Command):
c.interrupt()
c.cancel()
class CommandResult():
def __init__(self, rc, stdout, stderr, completed, halt, pickled=False):
self.rc = rc
if pickled:
self.stdout = stdout
else:
self.stdout = stdout.decode()
self.stderr = stderr.decode()
self.completed = completed
self.halt = halt
def printResult(self):
res = "cmd had rc=%s completed=%s halted=%s\n stdout='%s'\n " \
"stderr='%s'" % (str(self.rc), str(self.completed), str(self.halt), self.stdout, self.stderr)
return res
def wasSuccessful(self):
if self.halt:
return False
if not self.completed:
return False
if self.rc != 0:
return False
return True
def __str__(self):
return self.printResult()
def split_stdout(self, how=':'):
for line in self.stdout.split('\n'):
ret = line.split(how, 1)
if len(ret) == 2:
yield ret
class ExecutionError(Exception):
def __init__(self, summary, cmd):
self.summary = summary
self.cmd = cmd
def __str__(self):
return "ExecutionError: '%s' occurred. Details: '%s' %s" % \
(self.summary, self.cmd.cmdStr, self.cmd.get_results().printResult())
LOCAL = 1
REMOTE = 2
gExecutionContextFactory = None
def setExecutionContextFactory(factory):
global gExecutionContextFactory
gExecutionContextFactory = factory
def createExecutionContext(execution_context_id, remoteHost, stdin, gphome=None):
if gExecutionContextFactory is not None:
return gExecutionContextFactory.createExecutionContext(execution_context_id, remoteHost, stdin)
elif execution_context_id == LOCAL:
return LocalExecutionContext(stdin)
elif execution_context_id == REMOTE:
if remoteHost is None:
raise Exception("Programmer Error. Specified REMOTE execution context but didn't provide a remoteHost")
return RemoteExecutionContext(remoteHost, stdin, gphome)
class ExecutionContext():
def __init__(self):
pass
def execute(self, cmd):
pass
def interrupt(self):
pass
def cancel(self):
pass
class LocalExecutionContext(ExecutionContext):
proc = None
halt = False
completed = False
def __init__(self, stdin):
ExecutionContext.__init__(self)
self.stdin = stdin
pass
def execute(self, cmd, wait=True, pickled=False):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
keys = sorted(list(cmd.propagate_env_map.keys()), reverse=True)
for k in keys:
cmd.cmdStr = "%s=%s && %s" % (k, cmd.propagate_env_map[k], cmd.cmdStr)
# actual command executed, but the shell that command string runs under.
self.proc = gpsubprocess.Popen(cmd.cmdStr, env=None, shell=True,
executable='/bin/bash',
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE, close_fds=True)
cmd.pid = self.proc.pid
if wait:
(rc, stdout_value, stderr_value) = self.proc.communicate2(input=self.stdin)
self.completed = True
cmd.set_results(CommandResult(
rc, stdout_value, stderr_value, self.completed, self.halt, pickled=pickled))
def cancel(self):
if self.proc:
try:
os.kill(self.proc.pid, signal.SIGTERM)
except OSError:
pass
def interrupt(self):
self.halt = True
if self.proc:
self.proc.cancel()
class RemoteExecutionContext(LocalExecutionContext):
trail = set()
def __init__(self, targetHost, stdin, gphome=None):
LocalExecutionContext.__init__(self, stdin)
self.targetHost = targetHost
if gphome:
self.gphome = gphome
else:
self.gphome = GPHOME
def execute(self, cmd, pickled=False):
# prepend env. variables from ExcecutionContext.propagate_env_map
# e.g. Given {'FOO': 1, 'BAR': 2}, we'll produce "FOO=1 BAR=2 ..."
self.__class__.trail.add(self.targetHost)
keys = sorted(list(cmd.propagate_env_map.keys()), reverse=True)
for k in keys:
cmd.cmdStr = "%s=%s && %s" % (k, cmd.propagate_env_map[k], cmd.cmdStr)
cmd.cmdStr = cmd.cmdStr.replace('\\','\\\\').replace('"', '\\"')
cmd.cmdStr = "ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=60 " \
"{targethost} \"{gphome} {cmdstr}\"".format(targethost=self.targetHost,
gphome=". %s/greenplum_path.sh;" % self.gphome,
cmdstr=cmd.cmdStr)
LocalExecutionContext.execute(self, cmd, pickled=pickled)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd)
pass
def __retry(self, cmd, count=0):
if count == SSH_MAX_RETRY:
return
time.sleep(SSH_RETRY_DELAY)
LocalExecutionContext.execute(self, cmd, pickled=pickled)
if (cmd.get_results().stderr.startswith('ssh_exchange_identification: Connection closed by remote host')):
self.__retry(cmd, count + 1)
class Command(object):
name = None
cmdStr = None
results = None
exec_context = None
propagate_env_map = {} # specific environment variables for this command instance
def __init__(self, name, cmdStr, ctxt=LOCAL, remoteHost=None, stdin=None, gphome=None, pickled=False):
self.name = name
self.cmdStr = cmdStr
self.exec_context = createExecutionContext(ctxt, remoteHost, stdin=stdin,
gphome=gphome)
self.remoteHost = remoteHost
self.logger = gplog.get_default_logger()
self.pickled = pickled
def __str__(self):
if self.results:
return "%s cmdStr='%s' had result: %s" % (self.name, self.cmdStr, self.results)
else:
return "%s cmdStr='%s'" % (self.name, self.cmdStr)
# Start a process that will execute the command but don't wait for
# it to complete. Return the Popen object instead.
def runNoWait(self):
self.exec_context.execute(self, wait=False, pickled=self.pickled)
return self.exec_context.proc
def run(self, validateAfter=False):
self.logger.debug("Running Command: %s" % self.cmdStr)
self.exec_context.execute(self, pickled=self.pickled)
if validateAfter:
self.validate()
pass
def set_results(self, results):
self.results = results
def get_results(self):
return self.results
def get_stdout(self, strip=True):
if self.results is None:
raise Exception("command not run yet")
return self.results.stdout if not strip else self.results.stdout.strip()
def get_stdout_lines(self):
return self.results.stdout.splitlines()
def get_stderr_lines(self):
return self.results.stderr.splitlines()
def get_return_code(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.rc
def get_stderr(self):
if self.results is None:
raise Exception("command not run yet")
return self.results.stderr
def cancel(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.cancel()
def interrupt(self):
if self.exec_context and isinstance(self.exec_context, ExecutionContext):
self.exec_context.interrupt()
def was_successful(self):
if self.results is None:
return False
else:
return self.results.wasSuccessful()
def validate(self, expected_rc=0):
if self.results.rc != expected_rc:
self.logger.debug(self.results)
raise ExecutionError("non-zero rc: %d" % self.results.rc, self)
class SQLCommand(Command):
def __init__(self, name):
Command.__init__(self, name, cmdStr=None)
self.cancel_flag = False
self.cancel_conn = None
def run(self, validateAfter=False):
raise ExecutionError("programmer error. implementors of SQLCommand must implement run()", self)
def interrupt(self):
# No execution context for SQLCommands
pass
def cancel(self):
# assignment is an atomic operation in python
self.cancel_flag = True
# if self.conn is not set we cannot cancel.
if self.cancel_conn:
DB(self.cancel_conn).cancel()
def run_remote_commands(name, commands):
cmds = {}
pool = WorkerPool()
for host, cmdStr in list(commands.items()):
cmd = Command(name=name, cmdStr=cmdStr, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
cmds[host] = cmd
pool.join()
pool.check_results()
return cmds
| true | true |
f71748e77f76da11ab56b956f1c6463063b71ea1 | 1,609 | py | Python | main.py | manulaiko/ulauncher-openInBrowser | 2536813431253f4711a950b50669d1bb6f842de4 | [
"MIT"
] | 1 | 2020-06-10T10:34:05.000Z | 2020-06-10T10:34:05.000Z | main.py | manulaiko/ulauncher-openInBrowser | 2536813431253f4711a950b50669d1bb6f842de4 | [
"MIT"
] | 6 | 2018-01-26T10:04:02.000Z | 2020-09-07T17:09:07.000Z | main.py | manulaiko/ulauncher-openInBrowser | 2536813431253f4711a950b50669d1bb6f842de4 | [
"MIT"
] | 5 | 2018-03-24T09:35:38.000Z | 2020-11-02T03:42:09.000Z | from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
import webbrowser
import re
class OpenInBrowser(Extension):
def __init__(self):
super(OpenInBrowser, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_argument()
items = [
ExtensionResultItem(
icon='images/icon.png',
name=event.get_argument(),
description='Open "%s" in the browser' % event.get_argument(),
on_enter=ExtensionCustomAction(data, keep_app_open=True)
)
]
return RenderResultListAction(items)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_data()
if not re.match(r'^https?://', data):
data = 'https://'+ data
webbrowser.open_new_tab(data)
return RenderResultListAction([])
if __name__ == '__main__':
OpenInBrowser().run() | 34.234043 | 85 | 0.712244 | from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
import webbrowser
import re
class OpenInBrowser(Extension):
def __init__(self):
super(OpenInBrowser, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_argument()
items = [
ExtensionResultItem(
icon='images/icon.png',
name=event.get_argument(),
description='Open "%s" in the browser' % event.get_argument(),
on_enter=ExtensionCustomAction(data, keep_app_open=True)
)
]
return RenderResultListAction(items)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_data()
if not re.match(r'^https?://', data):
data = 'https://'+ data
webbrowser.open_new_tab(data)
return RenderResultListAction([])
if __name__ == '__main__':
OpenInBrowser().run() | true | true |
f7174a07455b8d2615386130e8a5a7f7b941d1ab | 1,222 | py | Python | setup.py | Shravan-1908/pyscreenrec | 286c1a24c95918353388007e3c7fcd23d404ba8f | [
"MIT"
] | 15 | 2021-02-11T16:29:28.000Z | 2021-09-11T14:17:19.000Z | setup.py | Shravan-1908/pyscreenrec | 286c1a24c95918353388007e3c7fcd23d404ba8f | [
"MIT"
] | 2 | 2021-02-26T17:38:32.000Z | 2021-05-19T17:58:34.000Z | setup.py | Shravan-1908/pyscreenrec | 286c1a24c95918353388007e3c7fcd23d404ba8f | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
VERSION = 0.4
with open("README.md") as f:
README = f.read()
setup(
name = "pyscreenrec",
version = VERSION,
description = "A small and cross-platform python library for recording screen.",
long_description_content_type = "text/markdown",
long_description = README,
url="https://github.com/Shravan-1908/pyscreenrec",
author = "Shravan Asati",
author_email = "dev.shravan@protonmail.com",
packages = find_packages(),
install_requires = ["pyscreeze", "opencv-python", "natsort"],
license = 'MIT',
keywords = ["python", "screen recording", "screen", "recording", "screenshots"],
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Topic :: Software Development :: Libraries"
]
) | 37.030303 | 84 | 0.626023 | from setuptools import find_packages, setup
VERSION = 0.4
with open("README.md") as f:
README = f.read()
setup(
name = "pyscreenrec",
version = VERSION,
description = "A small and cross-platform python library for recording screen.",
long_description_content_type = "text/markdown",
long_description = README,
url="https://github.com/Shravan-1908/pyscreenrec",
author = "Shravan Asati",
author_email = "dev.shravan@protonmail.com",
packages = find_packages(),
install_requires = ["pyscreeze", "opencv-python", "natsort"],
license = 'MIT',
keywords = ["python", "screen recording", "screen", "recording", "screenshots"],
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Topic :: Software Development :: Libraries"
]
) | true | true |
f7174a5077e5e6d8533019e51bf2c5b67b88f10b | 984 | py | Python | python-dtf/tests/integration/prop/test_prop_dump.py | jakev/dtf | a761ace77cea051bfb88d56df65ae6b83f664480 | [
"Apache-2.0"
] | 58 | 2015-01-13T16:24:31.000Z | 2016-11-21T16:00:58.000Z | python-dtf/tests/integration/prop/test_prop_dump.py | jakev/dtf | a761ace77cea051bfb88d56df65ae6b83f664480 | [
"Apache-2.0"
] | 51 | 2015-03-11T20:42:21.000Z | 2017-01-18T02:49:10.000Z | python-dtf/tests/integration/prop/test_prop_dump.py | jakev/dtf | a761ace77cea051bfb88d56df65ae6b83f664480 | [
"Apache-2.0"
] | 15 | 2016-02-01T00:37:21.000Z | 2016-12-09T07:03:36.000Z | # Android Device Testing Framework ("dtf")
# Copyright 2013-2016 Jake Valletta (@jake_valletta)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration tests for the "prop dump" utility"""
from __future__ import absolute_import
import dtf.testutils as testutils
def test_no_args():
"""Running dump with no args"""
testutils.deploy_config(testutils.get_default_config())
rtn = testutils.dtf("prop dump")
testutils.undeploy()
assert(rtn.return_code == 0)
| 29.818182 | 74 | 0.748984 |
from __future__ import absolute_import
import dtf.testutils as testutils
def test_no_args():
testutils.deploy_config(testutils.get_default_config())
rtn = testutils.dtf("prop dump")
testutils.undeploy()
assert(rtn.return_code == 0)
| true | true |
f7174c3b69586ad7a0dd822a81cded6f137b0e87 | 1,204 | py | Python | tests/integration/test_tmp_policy/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 15,577 | 2019-09-23T11:57:53.000Z | 2022-03-31T18:21:48.000Z | tests/integration/test_tmp_policy/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 16,476 | 2019-09-23T11:47:00.000Z | 2022-03-31T23:06:01.000Z | tests/integration/test_tmp_policy/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 3,633 | 2019-09-23T12:18:28.000Z | 2022-03-31T15:55:48.000Z | # pylint: disable=unused-argument
# pylint: disable=redefined-outer-name
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node',
main_configs=["configs/config.d/storage_configuration.xml"],
tmpfs=['/disk1:size=100M', '/disk2:size=100M'])
@pytest.fixture(scope='module')
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_different_versions(start_cluster):
query = 'SELECT count(ignore(*)) FROM (SELECT * FROM system.numbers LIMIT 1e7) GROUP BY number'
settings = {
'max_bytes_before_external_group_by': 1 << 20,
'max_bytes_before_external_sort': 1 << 20,
}
assert node.contains_in_log('Setting up /disk1/ to store temporary data in it')
assert node.contains_in_log('Setting up /disk2/ to store temporary data in it')
node.query(query, settings=settings)
assert node.contains_in_log('Writing part of aggregation data into temporary file /disk1/')
assert node.contains_in_log('Writing part of aggregation data into temporary file /disk2/')
| 32.540541 | 99 | 0.69186 |
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node',
main_configs=["configs/config.d/storage_configuration.xml"],
tmpfs=['/disk1:size=100M', '/disk2:size=100M'])
@pytest.fixture(scope='module')
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_different_versions(start_cluster):
query = 'SELECT count(ignore(*)) FROM (SELECT * FROM system.numbers LIMIT 1e7) GROUP BY number'
settings = {
'max_bytes_before_external_group_by': 1 << 20,
'max_bytes_before_external_sort': 1 << 20,
}
assert node.contains_in_log('Setting up /disk1/ to store temporary data in it')
assert node.contains_in_log('Setting up /disk2/ to store temporary data in it')
node.query(query, settings=settings)
assert node.contains_in_log('Writing part of aggregation data into temporary file /disk1/')
assert node.contains_in_log('Writing part of aggregation data into temporary file /disk2/')
| true | true |
f7174ca5b3715337bd66e103f422e337ca016408 | 492 | py | Python | Home/migrations/0006_auto_20201005_2114.py | n3trob3/nimrodage | 578eb14e2e8f7dc7ae58913b6131fd60c1596c0b | [
"Apache-2.0"
] | null | null | null | Home/migrations/0006_auto_20201005_2114.py | n3trob3/nimrodage | 578eb14e2e8f7dc7ae58913b6131fd60c1596c0b | [
"Apache-2.0"
] | null | null | null | Home/migrations/0006_auto_20201005_2114.py | n3trob3/nimrodage | 578eb14e2e8f7dc7ae58913b6131fd60c1596c0b | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.1.1 on 2020-10-05 20:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Home', '0005_auto_20201005_2107'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='meeting',
field=models.CharField(blank=True, choices=[('G', 'Google/Internet search'), ('R', 'Referral'), ('O', 'Other')], max_length=1),
),
]
| 25.894737 | 140 | 0.571138 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Home', '0005_auto_20201005_2107'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='meeting',
field=models.CharField(blank=True, choices=[('G', 'Google/Internet search'), ('R', 'Referral'), ('O', 'Other')], max_length=1),
),
]
| true | true |
f7174cab91de04cda9ea71e938b27612f1038cb9 | 11,857 | py | Python | tests/test_stochatreat_assignment.py | RoyalTS/stochatreat | 6e638e748b8638b64a185229f78967cf864cd45e | [
"MIT"
] | null | null | null | tests/test_stochatreat_assignment.py | RoyalTS/stochatreat | 6e638e748b8638b64a185229f78967cf864cd45e | [
"MIT"
] | 13 | 2019-07-11T13:13:39.000Z | 2019-07-19T14:26:57.000Z | tests/test_stochatreat_assignment.py | RoyalTS/stochatreat | 6e638e748b8638b64a185229f78967cf864cd45e | [
"MIT"
] | null | null | null | import pytest
from math import gcd
import numpy as np
import pandas as pd
from stochatreat import stochatreat
from stochatreat import get_lcm_prob_denominators
################################################################################
# fixtures
################################################################################
@pytest.fixture(params=[10_000, 100_000])
def df(request):
N = request.param
df = pd.DataFrame(
data={
"id": np.arange(N),
"dummy": [1] * N,
"stratum1": np.random.randint(1, 100, size=N),
"stratum2": np.random.randint(0, 2, size=N),
}
)
return df
# a set of treatment assignment probabilities to throw at many tests
standard_probs = [[0.1, 0.9],
[1/3, 2/3],
[0.5, 0.5],
[2/3, 1/3],
[0.9, 0.1]]
# a set of stratum column combinations from the above df fixture to throw at
# many tests
standard_stratum_cols = [
["dummy"],
["stratum1"],
["stratum1", "stratum2"],
]
# a DataFrame and treatment assignment probabilities under which there will be
# no misfits
@pytest.fixture
def df_no_misfits():
N = 1_000
stratum_size = 10
df = pd.DataFrame(
data={
"id": np.arange(N),
"stratum": np.repeat(
np.arange(N / stratum_size),
repeats=stratum_size
)
}
)
return df
probs_no_misfits =[
[0.1, 0.9],
[0.5, 0.5],
[0.9, 0.1],
]
################################################################################
# overall treatment assignment proportions
################################################################################
@pytest.mark.parametrize("n_treats", [2, 3, 4, 5, 10])
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_no_probs(n_treats, stratum_cols, df):
"""
Tests that overall treatment assignment proportions across all strata are as
intended with equal treatment assignment probabilities -- relies on the Law
of Large Numbers, not deterministic
"""
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=n_treats,
idx_col="id",
random_state=42
)
treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]
np.testing.assert_almost_equal(
treatment_shares, np.array([1 / n_treats] * n_treats), decimal=2
)
@pytest.mark.parametrize("probs", standard_probs)
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_probs(probs, stratum_cols, df):
"""
Tests that overall treatment assignment proportions across all strata are as
intended with unequal treatment assignment probabilities -- relies on the
Law of Large Numbers, not deterministic
"""
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]
np.testing.assert_almost_equal(
treatment_shares, np.array(probs), decimal=2
)
@pytest.mark.parametrize("probs", probs_no_misfits)
def test_stochatreat_no_misfits(probs, df_no_misfits):
"""
Tests that overall treatment assignment proportions across all strata are as
intended when strata are such that there are no misfits
"""
treats = stochatreat(
data=df_no_misfits,
stratum_cols=["stratum"],
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]
np.testing.assert_almost_equal(
treatment_shares, np.array(probs), decimal=2
)
@pytest.mark.parametrize("probs", standard_probs)
def test_stochatreat_only_misfits(probs):
"""
Tests that overall treatment assignment proportions across all strata are as
intended when strata are such that there are only misfits and the number of
units is sufficiently large -- relies on the Law of Large Numbers, not
deterministic
"""
N = 10_000
df = pd.DataFrame(
data={
"id": np.arange(N),
"stratum": np.arange(N),
}
)
treats = stochatreat(
data=df,
stratum_cols=["stratum"],
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]
np.testing.assert_almost_equal(
treatment_shares, np.array(probs), decimal=2
)
################################################################################
# within-stratum treatment assignments
################################################################################
def get_within_strata_counts(treats):
"""Helper function to compute the treatment shares within strata"""
treatment_counts = (treats
.groupby(["stratum_id", "treat"])[["id"]]
.count()
.rename(columns={"id": "treat_count"})
.reset_index()
)
stratum_counts = (treats
.groupby(["stratum_id"])[["id"]]
.count()
.rename(columns={"id": "stratum_count"})
.reset_index()
)
counts = pd.merge(
treatment_counts, stratum_counts, on="stratum_id", how="left"
)
return counts
def compute_count_diff(treats, probs):
"""
Helper function to compute the treatment counts within strata and line them
up with required counts, and returns the different treatment counts
aggregated at the stratum level as well as the dataframe with the different
counts used in some tests
"""
counts = get_within_strata_counts(treats)
required_props = pd.DataFrame(
{"required_prop": probs, "treat": range(len(probs))}
)
comp = pd.merge(
counts, required_props, on="treat", how="left"
)
comp["desired_counts"] = comp["stratum_count"] * comp["required_prop"]
comp["count_diff"] = (comp["treat_count"] - comp["desired_counts"]).abs()
return comp
@pytest.mark.parametrize("n_treats", [2, 3, 4, 5, 10])
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_within_strata_no_probs(n_treats, stratum_cols, df):
"""
Tests that within strata treatment assignment counts are only as far from
the required counts as misfit assignment randomization allows with equal
treatment assignment probabilities but a differing number of treatments
"""
probs = n_treats * [1 / n_treats]
lcm_prob_denominators = n_treats
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=n_treats,
idx_col="id",
random_state=42
)
comp = compute_count_diff(treats, probs)
assert_msg = """The counts differences exceed the bound that misfit
allocation should not exceed"""
assert (comp["count_diff"] < lcm_prob_denominators).all(), assert_msg
@pytest.mark.parametrize("probs", standard_probs)
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_within_strata_probs(probs, stratum_cols, df):
"""
Tests that within strata treatment assignment counts are only as far from
the required counts as misfit assignment randomization allows with two
treatments but unequal treatment assignment probabilities
"""
lcm_prob_denominators = get_lcm_prob_denominators(probs)
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
comp = compute_count_diff(treats, probs)
assert_msg = """The counts differences exceed the bound that misfit
allocation should not exceed"""
assert (comp["count_diff"] < lcm_prob_denominators).all(), assert_msg
@pytest.mark.parametrize("probs", probs_no_misfits)
def test_stochatreat_within_strata_no_misfits(probs, df_no_misfits):
"""
Tests that within strata treatment assignment counts are exactly equal to
the required counts when strata are such that there are no misfits
"""
treats = stochatreat(
data=df_no_misfits,
stratum_cols=["stratum"],
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
)
comp = compute_count_diff(treats, probs)
assert_msg = "The required proportions are not reached without misfits"
assert (comp["count_diff"] == 0).all(), assert_msg
@pytest.mark.parametrize("probs", standard_probs)
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_global_strategy(probs, stratum_cols, df):
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=len(probs),
idx_col="id",
probs=probs,
random_state=42,
misfit_strategy="global"
)
comp = compute_count_diff(treats, probs)
stratum_count_diff = comp.groupby(["stratum_id"])["count_diff"].sum()
assert_msg = "There is more than one stratum with misfits"
assert (stratum_count_diff != 0).sum() <= 1, assert_msg
@pytest.mark.parametrize("misfit_strategy", ["global", "stratum"])
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
def test_stochatreat_stratum_ids(df, misfit_strategy, stratum_cols):
"""Tests that the function returns the right number of stratum ids"""
treats = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=2,
idx_col="id",
random_state=42,
misfit_strategy=misfit_strategy,
)
n_unique_strata = len(df[stratum_cols].drop_duplicates())
n_unique_stratum_ids = len(treats["stratum_id"].drop_duplicates())
if misfit_strategy == "global":
# depending on whether there are misfits
assert (
(n_unique_stratum_ids == n_unique_strata) or
(n_unique_stratum_ids - 1 == n_unique_strata)
)
else:
assert n_unique_stratum_ids == n_unique_strata
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
@pytest.mark.parametrize("misfit_strategy", ["global", "stratum"])
def test_stochatreat_random_state(df, stratum_cols, misfit_strategy):
"""
Tests that the results are the same on two consecutive calls with the same
random state
"""
random_state = 42
treats = []
for _ in range(2):
treatments_i = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=2,
idx_col="id",
random_state=random_state,
misfit_strategy=misfit_strategy,
)
treats.append(treatments_i)
pd.testing.assert_series_equal(
treats[0]["treat"], treats[1]["treat"]
)
@pytest.mark.parametrize("stratum_cols", standard_stratum_cols)
@pytest.mark.parametrize("misfit_strategy", ["global", "stratum"])
def test_stochatreat_shuffle_data(df, stratum_cols, misfit_strategy):
"""
Tests that the mapping between idx_col and the assignments is the same on
two consecutive calls with the same random state and shuffled data points
"""
random_state = 42
treats = []
for _ in range(2):
treatments_i = stochatreat(
data=df,
stratum_cols=stratum_cols,
treats=2,
idx_col="id",
random_state=random_state,
misfit_strategy=misfit_strategy,
)
treatments_i = treatments_i.sort_values("id")
treats.append(treatments_i)
df = df.sample(len(df), random_state=random_state)
pd.testing.assert_series_equal(
treats[0]["treat"], treats[1]["treat"]
)
| 30.017722 | 80 | 0.627899 | import pytest
from math import gcd
import numpy as np
import pandas as pd
from stochatreat import stochatreat
from stochatreat import get_lcm_prob_denominators
| true | true |
f7174cd8c1d5c09c4cbcb9df7c5490a9c0982657 | 1,305 | py | Python | source/ship.py | seveirbian/Plane-game | 96c5377e72d3dfb1c5720a1769e9db8e89624ed5 | [
"MIT"
] | 1 | 2018-06-12T08:56:52.000Z | 2018-06-12T08:56:52.000Z | source/ship.py | seveirbian/Plane-game | 96c5377e72d3dfb1c5720a1769e9db8e89624ed5 | [
"MIT"
] | null | null | null | source/ship.py | seveirbian/Plane-game | 96c5377e72d3dfb1c5720a1769e9db8e89624ed5 | [
"MIT"
] | null | null | null | import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
super().__init__()
'''初始化飞船并设置其初始位置'''
self.screen = screen
self.ai_settings = ai_settings
# 加载飞船图像并获取其外接矩形
self.image = pygame.image.load('../images/ship.png')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# 将每艘新飞船放在屏幕底部中央
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# 在飞船的属性center中存储小数值
self.center = float(self.rect.centerx)
# 移动标志
self.moving_right = False
self.moving_left = False
def blitme(self):
'''在指定位置绘制飞船'''
self.screen.blit(self.image, self.rect)
def update(self):
'''根据移动标志调整飞船的位置'''
# 更新飞船的center值,而不是rect
if self.moving_right and (self.rect.right < self.screen_rect.right):
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and (self.rect.left > 0):
self.center -= self.ai_settings.ship_speed_factor
# 根据self.center值跟新rect对象
self.rect.centerx = self.center
def center_ship(self):
'''让飞船在屏幕上居中'''
self.center = self.screen_rect.centerx | 29 | 76 | 0.622989 | import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
super().__init__()
self.screen = screen
self.ai_settings = ai_settings
self.image = pygame.image.load('../images/ship.png')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
self.center = float(self.rect.centerx)
self.moving_right = False
self.moving_left = False
def blitme(self):
self.screen.blit(self.image, self.rect)
def update(self):
if self.moving_right and (self.rect.right < self.screen_rect.right):
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and (self.rect.left > 0):
self.center -= self.ai_settings.ship_speed_factor
self.rect.centerx = self.center
def center_ship(self):
self.center = self.screen_rect.centerx | true | true |
f7174db2dec152988a0f2418273a7a9086488a8b | 4,836 | py | Python | pynubank/nubank.py | FlavioMoreiraTec/nubank-flavio | 224f483a7c7644116657c4f9e0929010ed511aa4 | [
"MIT"
] | null | null | null | pynubank/nubank.py | FlavioMoreiraTec/nubank-flavio | 224f483a7c7644116657c4f9e0929010ed511aa4 | [
"MIT"
] | null | null | null | pynubank/nubank.py | FlavioMoreiraTec/nubank-flavio | 224f483a7c7644116657c4f9e0929010ed511aa4 | [
"MIT"
] | null | null | null | import json
import os
import uuid
from typing import Tuple
import requests
from qrcode import QRCode
from requests import Response
PAYMENT_EVENT_TYPES = (
'TransferOutEvent',
'TransferInEvent',
'TransferOutReversalEvent',
'BarcodePaymentEvent',
'DebitPurchaseEvent',
'DebitPurchaseReversalEvent',
)
class NuException(Exception):
def __init__(self, status_code, response, url):
super().__init__(f'The request made failed with HTTP status code {status_code}')
self.url = url
self.status_code = status_code
self.response = response
class Nubank:
DISCOVERY_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/discovery'
DISCOVERY_APP_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/app/discovery'
auth_url = None
feed_url = None
proxy_list_url = None
proxy_list_app_url = None
query_url = None
bills_url = None
def __init__(self):
self.headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'pynubank Client - https://github.com/andreroggeri/pynubank',
}
self._update_proxy_urls()
self.auth_url = self.proxy_list_url['login']
@staticmethod
def _get_query(query_name):
root = os.path.abspath(os.path.dirname(__file__))
gql_file = query_name + '.gql'
path = os.path.join(root, 'queries', gql_file)
with open(path) as gql:
return gql.read()
def _update_proxy_urls(self):
request = requests.get(self.DISCOVERY_URL, headers=self.headers)
self.proxy_list_url = json.loads(request.content.decode('utf-8'))
request = requests.get(self.DISCOVERY_APP_URL, headers=self.headers)
self.proxy_list_app_url = json.loads(request.content.decode('utf-8'))
def _make_graphql_request(self, graphql_object):
body = {
'query': self._get_query(graphql_object)
}
response = requests.post(self.query_url, json=body, headers=self.headers)
return self._handle_response(response)
def _password_auth(self, cpf: str, password: str):
payload = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
response = requests.post(self.auth_url, json=payload, headers=self.headers)
data = self._handle_response(response)
return data
def _handle_response(self, response: Response) -> dict:
if response.status_code != 200:
raise NuException(response.status_code, response.json(), response.url)
return response.json()
def get_qr_code(self) -> Tuple[str, QRCode]:
content = str(uuid.uuid4())
qr = QRCode()
qr.add_data(content)
return content, qr
def authenticate_with_qr_code(self, cpf: str, password, uuid: str):
auth_data = self._password_auth(cpf, password)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
payload = {
'qr_code_id': uuid,
'type': 'login-webapp'
}
response = requests.post(self.proxy_list_app_url['lift'], json=payload, headers=self.headers)
auth_data = self._handle_response(response)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
self.feed_url = auth_data['_links']['events']['href']
self.query_url = auth_data['_links']['ghostflame']['href']
self.bills_url = auth_data['_links']['bills_summary']['href']
def get_card_feed(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_card_statements(self):
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
def get_bills(self):
request = requests.get(self.bills_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))['bills']
def get_bill_details(self, bill):
request = requests.get(bill['_links']['self']['href'], headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_account_feed(self):
data = self._make_graphql_request('account_feed')
return data['data']['viewer']['savingsAccount']['feed']
def get_account_statements(self):
feed = self.get_account_feed()
return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed))
def get_account_balance(self):
data = self._make_graphql_request('account_balance')
return data['data']['viewer']['savingsAccount']['currentSavingsBalance']['netAmount']
| 35.29927 | 101 | 0.651778 | import json
import os
import uuid
from typing import Tuple
import requests
from qrcode import QRCode
from requests import Response
PAYMENT_EVENT_TYPES = (
'TransferOutEvent',
'TransferInEvent',
'TransferOutReversalEvent',
'BarcodePaymentEvent',
'DebitPurchaseEvent',
'DebitPurchaseReversalEvent',
)
class NuException(Exception):
def __init__(self, status_code, response, url):
super().__init__(f'The request made failed with HTTP status code {status_code}')
self.url = url
self.status_code = status_code
self.response = response
class Nubank:
DISCOVERY_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/discovery'
DISCOVERY_APP_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/app/discovery'
auth_url = None
feed_url = None
proxy_list_url = None
proxy_list_app_url = None
query_url = None
bills_url = None
def __init__(self):
self.headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'pynubank Client - https://github.com/andreroggeri/pynubank',
}
self._update_proxy_urls()
self.auth_url = self.proxy_list_url['login']
@staticmethod
def _get_query(query_name):
root = os.path.abspath(os.path.dirname(__file__))
gql_file = query_name + '.gql'
path = os.path.join(root, 'queries', gql_file)
with open(path) as gql:
return gql.read()
def _update_proxy_urls(self):
request = requests.get(self.DISCOVERY_URL, headers=self.headers)
self.proxy_list_url = json.loads(request.content.decode('utf-8'))
request = requests.get(self.DISCOVERY_APP_URL, headers=self.headers)
self.proxy_list_app_url = json.loads(request.content.decode('utf-8'))
def _make_graphql_request(self, graphql_object):
body = {
'query': self._get_query(graphql_object)
}
response = requests.post(self.query_url, json=body, headers=self.headers)
return self._handle_response(response)
def _password_auth(self, cpf: str, password: str):
payload = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
response = requests.post(self.auth_url, json=payload, headers=self.headers)
data = self._handle_response(response)
return data
def _handle_response(self, response: Response) -> dict:
if response.status_code != 200:
raise NuException(response.status_code, response.json(), response.url)
return response.json()
def get_qr_code(self) -> Tuple[str, QRCode]:
content = str(uuid.uuid4())
qr = QRCode()
qr.add_data(content)
return content, qr
def authenticate_with_qr_code(self, cpf: str, password, uuid: str):
auth_data = self._password_auth(cpf, password)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
payload = {
'qr_code_id': uuid,
'type': 'login-webapp'
}
response = requests.post(self.proxy_list_app_url['lift'], json=payload, headers=self.headers)
auth_data = self._handle_response(response)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
self.feed_url = auth_data['_links']['events']['href']
self.query_url = auth_data['_links']['ghostflame']['href']
self.bills_url = auth_data['_links']['bills_summary']['href']
def get_card_feed(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_card_statements(self):
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
def get_bills(self):
request = requests.get(self.bills_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))['bills']
def get_bill_details(self, bill):
request = requests.get(bill['_links']['self']['href'], headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_account_feed(self):
data = self._make_graphql_request('account_feed')
return data['data']['viewer']['savingsAccount']['feed']
def get_account_statements(self):
feed = self.get_account_feed()
return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed))
def get_account_balance(self):
data = self._make_graphql_request('account_balance')
return data['data']['viewer']['savingsAccount']['currentSavingsBalance']['netAmount']
| true | true |
f7174f069101d37e1152c091948b84f7ddc5aa8d | 313 | py | Python | py_merge/mergeexample.py | mutazag/misc | dfef362cdd835ef4efd1f2d02e13ff5297ccfc0f | [
"MIT"
] | null | null | null | py_merge/mergeexample.py | mutazag/misc | dfef362cdd835ef4efd1f2d02e13ff5297ccfc0f | [
"MIT"
] | null | null | null | py_merge/mergeexample.py | mutazag/misc | dfef362cdd835ef4efd1f2d02e13ff5297ccfc0f | [
"MIT"
] | null | null | null | #%%
import pandas as pd
#%%
df1 = pd.read_csv('df1.csv', index_col=0)
# %%
df2 = pd.read_csv('df2.csv', index_col=0)
# %%
df3 = pd.read_csv('df3.csv', index_col=0)
# %%
df1.merge(df2, on='proj_id').merge(df3, on='doc_id')
# %%
df1.merge(df2, on='proj_id', how='left').merge(df3, on='doc_id', how='left')
# %%
| 18.411765 | 76 | 0.603834 |
import pandas as pd
df1 = pd.read_csv('df1.csv', index_col=0)
df2 = pd.read_csv('df2.csv', index_col=0)
df3 = pd.read_csv('df3.csv', index_col=0)
df1.merge(df2, on='proj_id').merge(df3, on='doc_id')
df1.merge(df2, on='proj_id', how='left').merge(df3, on='doc_id', how='left')
| true | true |
f7175023ba297508308f5f971d92777633745cb2 | 1,542 | py | Python | tests/python/gaia-ui-tests/gaiatest/tests/functional/settings/test_settings_media_storage.py | BReduardokramer/gaia | c00302cdcd435ab193e8365917cfc6abac9e4f2e | [
"Apache-2.0"
] | 1 | 2021-11-09T00:27:34.000Z | 2021-11-09T00:27:34.000Z | tests/python/gaia-ui-tests/gaiatest/tests/functional/settings/test_settings_media_storage.py | Delphine/gaia | df92f0ebd89efbc63570a61e70c4304c17b8b555 | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/tests/functional/settings/test_settings_media_storage.py | Delphine/gaia | df92f0ebd89efbc63570a61e70c4304c17b8b555 | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.settings.app import Settings
class TestSettingsMediaStorage(GaiaTestCase):
def test_settings_media_storage(self):
settings = Settings(self.marionette)
settings.launch()
media_storage_settings = settings.open_media_storage_settings()
# Check that no media is on the device
self.assertEqual(media_storage_settings.music_size, '0 B')
self.assertEqual(media_storage_settings.pictures_size, '0 B')
self.assertEqual(media_storage_settings.movies_size, '0 B')
# Close the settings application
self.apps.kill(settings.app)
# Push media to the device
self.push_resource('VID_0001.3gp', destination='DCIM/100MZLLA')
self.push_resource('IMG_0001.jpg', destination='DCIM/100MZLLA')
self.push_resource('MUS_0001.mp3', destination='DCIM/100MZLLA')
# Access 'Media storage' in Settings
settings.launch()
media_storage_settings = settings.open_media_storage_settings()
# Check that media storage has updated to reflect the newly pushed media
self.assertEqual(media_storage_settings.music_size, '120 KB')
self.assertEqual(media_storage_settings.pictures_size, '348 KB')
self.assertEqual(media_storage_settings.movies_size, '120 KB')
| 40.578947 | 80 | 0.72179 |
from gaiatest import GaiaTestCase
from gaiatest.apps.settings.app import Settings
class TestSettingsMediaStorage(GaiaTestCase):
def test_settings_media_storage(self):
settings = Settings(self.marionette)
settings.launch()
media_storage_settings = settings.open_media_storage_settings()
self.assertEqual(media_storage_settings.music_size, '0 B')
self.assertEqual(media_storage_settings.pictures_size, '0 B')
self.assertEqual(media_storage_settings.movies_size, '0 B')
self.apps.kill(settings.app)
self.push_resource('VID_0001.3gp', destination='DCIM/100MZLLA')
self.push_resource('IMG_0001.jpg', destination='DCIM/100MZLLA')
self.push_resource('MUS_0001.mp3', destination='DCIM/100MZLLA')
settings.launch()
media_storage_settings = settings.open_media_storage_settings()
self.assertEqual(media_storage_settings.music_size, '120 KB')
self.assertEqual(media_storage_settings.pictures_size, '348 KB')
self.assertEqual(media_storage_settings.movies_size, '120 KB')
| true | true |
f71752ad85213d316ee14113c2e19d7243632bd1 | 17,836 | py | Python | features/eolearn/features/radiometric_normalization.py | mohammadrezabk/eo-learn | 8de3cfd64e74c1e4832e585954cdbf0ee9676eb3 | [
"MIT"
] | null | null | null | features/eolearn/features/radiometric_normalization.py | mohammadrezabk/eo-learn | 8de3cfd64e74c1e4832e585954cdbf0ee9676eb3 | [
"MIT"
] | null | null | null | features/eolearn/features/radiometric_normalization.py | mohammadrezabk/eo-learn | 8de3cfd64e74c1e4832e585954cdbf0ee9676eb3 | [
"MIT"
] | null | null | null | """
Module for radiometric normalization
Credits:
Copyright (c) 2018-2019 Johannes Schmid (GeoVille)
Copyright (c) 2017-2019 Matej Aleksandrov, Matic Lubej, Devis Peresutti (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import numpy as np
from eolearn.core import EOTask, FeatureType
class ReferenceScenes(EOTask):
""" Creates a layer of reference scenes which have the highest fraction of valid pixels.
The number of reference scenes is limited to a definable number.
Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018
:param feature: Name of the eopatch data layer. Needs to be of the FeatureType "DATA".
:type feature: (FeatureType, str) or (FeatureType, str, str)
:param valid_fraction_feature: Name of the layer containing the valid fraction obtained with the EOTask
'AddValidDataFraction'. Needs to be of the FeatureType "SCALAR".
:type valid_fraction_feature: (FeatureType, str)
:param max_scene_number: Maximum number of reference scenes taken for the creation of the composite. By default,
the maximum number of scenes equals the number of time frames
:type max_scene_number: int
"""
def __init__(self, feature, valid_fraction_feature, max_scene_number=None):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_REFERENCE'.format)
self.valid_fraction_feature = self._parse_features(valid_fraction_feature,
default_feature_type=FeatureType.SCALAR)
self.number = max_scene_number
def execute(self, eopatch):
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
valid_fraction_feature_type, valid_fraction_feature_name = next(self.valid_fraction_feature(eopatch))
valid_frac = list(eopatch[valid_fraction_feature_type][valid_fraction_feature_name].flatten())
data = eopatch[feature_type][feature_name]
number = data.shape[0] if self.number is None else self.number
eopatch[feature_type][new_feature_name] = np.array([data[x] for _, x in
sorted(zip(valid_frac, range(data.shape[0])), reverse=True)
if x <= number-1])
return eopatch
class BaseCompositing(EOTask):
""" Base class to create a composite of reference scenes
Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018
:param feature: Feature holding the input time-series. Default type is FeatureType.DATA
:type feature: (FeatureType, str)
:param feature_composite: Type and name of output composite image. Default type is FeatureType.DATA_TIMELESS
:type feature_composite: (FeatureType, str)
:param percentile: Percentile along the time dimension used for compositing. Methods use different percentiles
:type percentile: int or list
:param max_index: Value used to flag indices with NaNs. Could be integer or NaN. Default is 255
:type max_index: int or NaN
:param interpolation: Method used to compute percentile. Allowed values are {'geoville', 'linear', 'lower',
'higher', 'midpoint', 'nearest'}. 'geoville' interpolation performs a custom
implementation, while the other methods use the numpy `percentile` function. Default is
'lower'
:type interpolation: str
:param no_data_value: Value in the composite assigned to non valid data points. Default is NaN
:type no_data_value: float or NaN
"""
def __init__(self, feature, feature_composite, percentile=None, max_index=255, interpolation='lower',
no_data_value=np.nan):
self.feature = self._parse_features(feature,
default_feature_type=FeatureType.DATA,
rename_function='{}_COMPOSITE'.format)
self.composite_type, self.composite_name = next(
self._parse_features(feature_composite, default_feature_type=FeatureType.DATA_TIMELESS)())
self.percentile = percentile
self.max_index = max_index
self.interpolation = interpolation
self._index_by_percentile = self._geoville_index_by_percentile \
if self.interpolation.lower() == 'geoville' else self._numpy_index_by_percentile
self.no_data_value = no_data_value
def _numpy_index_by_percentile(self, data, percentile):
""" Calculate percentile of numpy stack and return the index of the chosen pixel.
numpy percentile function is used with one of the following interpolations {'linear', 'lower', 'higher',
'midpoint', 'nearest'}
"""
data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)
indices = np.empty(data_perc_low.shape, dtype=np.uint8)
indices[:] = np.nan
abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))
indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))
return indices
def _geoville_index_by_percentile(self, data, percentile):
""" Calculate percentile of numpy stack and return the index of the chosen pixel. """
# no_obs = bn.allnan(arr_tmp["data"], axis=0)
data_tmp = np.array(data, copy=True)
valid_obs = np.sum(np.isfinite(data_tmp), axis=0)
# replace NaN with maximum
max_val = np.nanmax(data_tmp) + 1
data_tmp[np.isnan(data_tmp)] = max_val
# sort - former NaNs will move to the end
ind_tmp = np.argsort(data_tmp, kind="mergesort", axis=0)
# desired position as well as floor and ceiling of it
k_arr = (valid_obs - 1) * (percentile / 100.0)
k_arr = np.where(k_arr < 0, 0, k_arr)
f_arr = np.floor(k_arr + 0.5)
f_arr = f_arr.astype(int)
# get floor value of reference band and index band
ind = f_arr.astype("int16")
y_val, x_val = ind_tmp.shape[1], ind_tmp.shape[2]
y_val, x_val = np.ogrid[0:y_val, 0:x_val]
idx = np.where(valid_obs == 0, self.max_index, ind_tmp[ind, y_val, x_val])
return idx
def _get_reference_band(self, data):
""" Extract reference band from input 4D data according to compositing method
:param data: 4D array from which to extract reference band (e.g. blue, maxNDVI, ..)
:type data: numpy array
:return: 3D array containing reference band according to compositing method
"""
raise NotImplementedError
def _get_indices(self, data):
""" Compute indices along temporal dimension corresponding to the sought percentile
:param data: Input 3D array holding the reference band
:type data: numpy array
:return: 2D array holding the temporal index corresponding to percentile
"""
indices = self._index_by_percentile(data, self.percentile)
return indices
def execute(self, eopatch):
""" Compute composite array merging temporal frames according to the compositing method
:param eopatch: eopatch holding time-series
:return: eopatch with composite image of time-series
"""
feature_type, feature_name = next(self.feature(eopatch))
data = eopatch[feature_type][feature_name].copy()
# compute band according to compositing method (e.g. blue, maxNDVI, maxNDWI)
reference_bands = self._get_reference_band(data)
# find temporal indices corresponding to pre-defined percentile
indices = self._get_indices(reference_bands)
# compute composite image selecting values along temporal dimension corresponding to percentile indices
composite_image = np.empty((data.shape[1:]), np.float32)
composite_image[:] = self.no_data_value
for scene_id, scene in enumerate(data):
composite_image = np.where(np.dstack([indices]) == scene_id, scene, composite_image)
eopatch[self.composite_type][self.composite_name] = composite_image
return eopatch
class BlueCompositing(BaseCompositing):
""" Blue band compositing method
- blue (25th percentile of the blue band)
:param blue_idx: Index of blue band in `feature` array
:type blue_idx: int
"""
def __init__(self, feature, feature_composite, blue_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
if not isinstance(blue_idx, int):
raise ValueError('Incorrect value of blue band index specified')
def _get_reference_band(self, data):
""" Extract the blue band from time-series
:param data: 4D array from which to extract the blue reference band
:type data: numpy array
:return: 3D array containing the blue reference band
"""
return data[..., self.blue_idx].astype("float32")
class HOTCompositing(BaseCompositing):
""" HOT compositing method
- HOT (Index using bands blue and red)
The HOT index is defined as per
Zhu, Z., & Woodcock, C. E. (2012). "Object-based cloud and cloud shadow detection in Landsat imagery."
Remote Sensing of Environment, 118, 83-94.
:param blue_idx: Index of blue band in `feature` array
:type blue_idx: int
:param red_idx: Index of red band in `feature` array
:type red_idx: int
"""
def __init__(self, feature, feature_composite, blue_idx, red_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
self.red_idx = red_idx
if not isinstance(blue_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of blue and red band indices specified')
def _get_reference_band(self, data):
""" Extract the HOT band from time-series
:param data: 4D array from which to extract the HOT reference band
:type data: numpy array
:return: 3D array containing the HOT reference band
"""
return data[..., self.blue_idx] - 0.5 * data[..., self.red_idx] - 0.08
class MaxNDVICompositing(BaseCompositing):
""" maxNDVI compositing method
- maxNDVI (temporal maximum of NDVI)
:param red_idx: Index of red band in `feature` array
:type red_idx: int
:param nir_idx: Index of NIR band in `feature` array
:type nir_idx: int
"""
def __init__(self, feature, feature_composite, red_idx, nir_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=[0, 100], interpolation=interpolation)
self.red_idx = red_idx
self.nir_idx = nir_idx
if not isinstance(nir_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of red and NIR band indices specified')
def _get_reference_band(self, data):
""" Extract the NDVI band from time-series
:param data: 4D array from which to compute the NDVI reference band
:type data: numpy array
:return: 3D array containing the NDVI reference band
"""
nir = data[..., self.nir_idx].astype("float32")
red = data[..., self.red_idx].astype("float32")
return (nir - red) / (nir + red)
def _get_indices(self, data):
median = np.nanmedian(data, axis=0)
indices_min = self._index_by_percentile(data, self.percentile[0])
indices_max = self._index_by_percentile(data, self.percentile[1])
indices = np.where(median < -0.05, indices_min, indices_max)
return indices
class MaxNDWICompositing(BaseCompositing):
""" maxNDWI compositing method
- maxNDWI (temporal maximum of NDWI)
:param nir_idx: Index of NIR band in `feature` array
:type nir_idx: int
:param swir1_idx: Index of SWIR1 band in `feature` array
:type swir1_idx: int
"""
def __init__(self, feature, feature_composite, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values of NIR and SWIR1 band indices specified')
def _get_reference_band(self, data):
""" Extract the NDWI band from time-series
:param data: 4D array from which to compute the NDWI reference band
:type data: numpy array
:return: 3D array containing the NDWI reference band
"""
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return (nir - swir1) / (nir + swir1)
class MaxRatioCompositing(BaseCompositing):
""" maxRatio compositing method
- maxRatio (temporal maximum of a ratio using bands blue, NIR and SWIR)
:param blue_idx: Index of blue band in `feature` array
:type blue_idx: int
:param nir_idx: Index of NIR band in `feature` array
:type nir_idx: int
:param swir1_idx: Index of SWIR1 band in `feature` array
:type swir1_idx: int
"""
def __init__(self, feature, feature_composite, blue_idx, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.blue_idx = blue_idx
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(blue_idx, int) or not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values for either blue, NIR or SWIR1 band indices specified')
def _get_reference_band(self, data):
""" Extract the max-ratio band from time-series
The max-ratio is defined as max(NIR,SWIR1)/BLUE
:param data: 4D array from which to compute the max-ratio reference band
:type data: numpy array
:return: 3D array containing the max-ratio reference band
"""
blue = data[..., self.blue_idx].astype("float32")
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return np.nanmax(np.array([nir, swir1]), axis=0) / blue
class HistogramMatching(EOTask):
""" Histogram match of each band of each scene within a time-series with respect to the corresponding band of a
reference composite.
Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018
:param feature: Name of the eopatch data layer that will undergo a histogram match.
Should be of the FeatureType "DATA".
:type feature: (FeatureType, str) or (FeatureType, str, str)
:param reference: Name of the eopatch data layer that represents the reference for the histogram match.
Should be of the FeatureType "DATA_TIMELESS".
:type reference: (FeatureType, str)
"""
def __init__(self, feature, reference):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_NORMALISED'.format)
self.reference = self._parse_features(reference, default_feature_type=FeatureType.DATA_TIMELESS)
def execute(self, eopatch):
""" Perform histogram matching of the time-series with respect to a reference scene
:param eopatch: eopatch holding the time-series and reference data
:type eopatch: EOPatch
:return: The same eopatch instance with the normalised time-series
"""
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
reference_type, reference_name = next(self.reference(eopatch))
reference_scene = eopatch[reference_type][reference_name]
# check if band dimension matches
if reference_scene.shape[-1] != eopatch[feature_type][feature_name].shape[-1]:
raise ValueError('Time-series and reference scene must have corresponding bands')
eopatch[feature_type][new_feature_name] = np.zeros_like(eopatch[feature_type][feature_name])
for source_id, source in enumerate(eopatch[feature_type][feature_name]):
# mask-out same invalid pixels
src_masked = np.where(np.isnan(reference_scene), np.nan, source)
ref_masked = np.where(np.isnan(source), np.nan, reference_scene)
# compute statistics
std_ref = np.nanstd(ref_masked, axis=(0, 1), dtype=np.float64)
std_src = np.nanstd(src_masked, axis=(0, 1), dtype=np.float64)
mean_ref = np.nanmean(ref_masked, axis=(0, 1), dtype=np.float64)
mean_src = np.nanmean(src_masked, axis=(0, 1), dtype=np.float64)
# normalise values
eopatch[feature_type][new_feature_name][source_id] = \
source * (std_ref / std_src) + (mean_ref - (mean_src * (std_ref / std_src)))
return eopatch
| 46.569191 | 120 | 0.655808 |
import numpy as np
from eolearn.core import EOTask, FeatureType
class ReferenceScenes(EOTask):
def __init__(self, feature, valid_fraction_feature, max_scene_number=None):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_REFERENCE'.format)
self.valid_fraction_feature = self._parse_features(valid_fraction_feature,
default_feature_type=FeatureType.SCALAR)
self.number = max_scene_number
def execute(self, eopatch):
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
valid_fraction_feature_type, valid_fraction_feature_name = next(self.valid_fraction_feature(eopatch))
valid_frac = list(eopatch[valid_fraction_feature_type][valid_fraction_feature_name].flatten())
data = eopatch[feature_type][feature_name]
number = data.shape[0] if self.number is None else self.number
eopatch[feature_type][new_feature_name] = np.array([data[x] for _, x in
sorted(zip(valid_frac, range(data.shape[0])), reverse=True)
if x <= number-1])
return eopatch
class BaseCompositing(EOTask):
def __init__(self, feature, feature_composite, percentile=None, max_index=255, interpolation='lower',
no_data_value=np.nan):
self.feature = self._parse_features(feature,
default_feature_type=FeatureType.DATA,
rename_function='{}_COMPOSITE'.format)
self.composite_type, self.composite_name = next(
self._parse_features(feature_composite, default_feature_type=FeatureType.DATA_TIMELESS)())
self.percentile = percentile
self.max_index = max_index
self.interpolation = interpolation
self._index_by_percentile = self._geoville_index_by_percentile \
if self.interpolation.lower() == 'geoville' else self._numpy_index_by_percentile
self.no_data_value = no_data_value
def _numpy_index_by_percentile(self, data, percentile):
data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)
indices = np.empty(data_perc_low.shape, dtype=np.uint8)
indices[:] = np.nan
abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))
indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))
return indices
def _geoville_index_by_percentile(self, data, percentile):
data_tmp = np.array(data, copy=True)
valid_obs = np.sum(np.isfinite(data_tmp), axis=0)
max_val = np.nanmax(data_tmp) + 1
data_tmp[np.isnan(data_tmp)] = max_val
ind_tmp = np.argsort(data_tmp, kind="mergesort", axis=0)
k_arr = (valid_obs - 1) * (percentile / 100.0)
k_arr = np.where(k_arr < 0, 0, k_arr)
f_arr = np.floor(k_arr + 0.5)
f_arr = f_arr.astype(int)
ind = f_arr.astype("int16")
y_val, x_val = ind_tmp.shape[1], ind_tmp.shape[2]
y_val, x_val = np.ogrid[0:y_val, 0:x_val]
idx = np.where(valid_obs == 0, self.max_index, ind_tmp[ind, y_val, x_val])
return idx
def _get_reference_band(self, data):
raise NotImplementedError
def _get_indices(self, data):
indices = self._index_by_percentile(data, self.percentile)
return indices
def execute(self, eopatch):
feature_type, feature_name = next(self.feature(eopatch))
data = eopatch[feature_type][feature_name].copy()
reference_bands = self._get_reference_band(data)
indices = self._get_indices(reference_bands)
composite_image = np.empty((data.shape[1:]), np.float32)
composite_image[:] = self.no_data_value
for scene_id, scene in enumerate(data):
composite_image = np.where(np.dstack([indices]) == scene_id, scene, composite_image)
eopatch[self.composite_type][self.composite_name] = composite_image
return eopatch
class BlueCompositing(BaseCompositing):
def __init__(self, feature, feature_composite, blue_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
if not isinstance(blue_idx, int):
raise ValueError('Incorrect value of blue band index specified')
def _get_reference_band(self, data):
return data[..., self.blue_idx].astype("float32")
class HOTCompositing(BaseCompositing):
def __init__(self, feature, feature_composite, blue_idx, red_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)
self.blue_idx = blue_idx
self.red_idx = red_idx
if not isinstance(blue_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of blue and red band indices specified')
def _get_reference_band(self, data):
return data[..., self.blue_idx] - 0.5 * data[..., self.red_idx] - 0.08
class MaxNDVICompositing(BaseCompositing):
def __init__(self, feature, feature_composite, red_idx, nir_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=[0, 100], interpolation=interpolation)
self.red_idx = red_idx
self.nir_idx = nir_idx
if not isinstance(nir_idx, int) or not isinstance(red_idx, int):
raise ValueError('Incorrect values of red and NIR band indices specified')
def _get_reference_band(self, data):
nir = data[..., self.nir_idx].astype("float32")
red = data[..., self.red_idx].astype("float32")
return (nir - red) / (nir + red)
def _get_indices(self, data):
median = np.nanmedian(data, axis=0)
indices_min = self._index_by_percentile(data, self.percentile[0])
indices_max = self._index_by_percentile(data, self.percentile[1])
indices = np.where(median < -0.05, indices_min, indices_max)
return indices
class MaxNDWICompositing(BaseCompositing):
def __init__(self, feature, feature_composite, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values of NIR and SWIR1 band indices specified')
def _get_reference_band(self, data):
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return (nir - swir1) / (nir + swir1)
class MaxRatioCompositing(BaseCompositing):
def __init__(self, feature, feature_composite, blue_idx, nir_idx, swir1_idx, interpolation='lower'):
super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)
self.blue_idx = blue_idx
self.nir_idx = nir_idx
self.swir1_idx = swir1_idx
if not isinstance(blue_idx, int) or not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):
raise ValueError('Incorrect values for either blue, NIR or SWIR1 band indices specified')
def _get_reference_band(self, data):
blue = data[..., self.blue_idx].astype("float32")
nir = data[..., self.nir_idx].astype("float32")
swir1 = data[..., self.swir1_idx].astype("float32")
return np.nanmax(np.array([nir, swir1]), axis=0) / blue
class HistogramMatching(EOTask):
def __init__(self, feature, reference):
self.feature = self._parse_features(feature, new_names=True,
default_feature_type=FeatureType.DATA,
rename_function='{}_NORMALISED'.format)
self.reference = self._parse_features(reference, default_feature_type=FeatureType.DATA_TIMELESS)
def execute(self, eopatch):
feature_type, feature_name, new_feature_name = next(self.feature(eopatch))
reference_type, reference_name = next(self.reference(eopatch))
reference_scene = eopatch[reference_type][reference_name]
if reference_scene.shape[-1] != eopatch[feature_type][feature_name].shape[-1]:
raise ValueError('Time-series and reference scene must have corresponding bands')
eopatch[feature_type][new_feature_name] = np.zeros_like(eopatch[feature_type][feature_name])
for source_id, source in enumerate(eopatch[feature_type][feature_name]):
src_masked = np.where(np.isnan(reference_scene), np.nan, source)
ref_masked = np.where(np.isnan(source), np.nan, reference_scene)
std_ref = np.nanstd(ref_masked, axis=(0, 1), dtype=np.float64)
std_src = np.nanstd(src_masked, axis=(0, 1), dtype=np.float64)
mean_ref = np.nanmean(ref_masked, axis=(0, 1), dtype=np.float64)
mean_src = np.nanmean(src_masked, axis=(0, 1), dtype=np.float64)
eopatch[feature_type][new_feature_name][source_id] = \
source * (std_ref / std_src) + (mean_ref - (mean_src * (std_ref / std_src)))
return eopatch
| true | true |
f71754673dd76b5b137364e722d76f8cba4d6ce8 | 3,160 | py | Python | pypureclient/flasharray/FA_2_11/models/software_bundle_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_11/models/software_bundle_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_11/models/software_bundle_response.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class SoftwareBundleResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[SoftwareBundle]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.SoftwareBundle]
):
"""
Keyword args:
items (list[SoftwareBundle])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareBundleResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoftwareBundleResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SoftwareBundleResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.214286 | 105 | 0.549051 |
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class SoftwareBundleResponse(object):
swagger_types = {
'items': 'list[SoftwareBundle]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None,
):
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `SoftwareBundleResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoftwareBundleResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SoftwareBundleResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7175555296b859086cc2c753888bdfe21cb502e | 936 | py | Python | deoldify/save.py | TaktakTaktouk/DeOldify | 0ff6139bb09d0abdf535a724f05bdad3ec04dcc1 | [
"MIT"
] | 14,898 | 2018-11-01T14:48:38.000Z | 2022-03-31T16:28:38.000Z | deoldify/save.py | TaktakTaktouk/DeOldify | 0ff6139bb09d0abdf535a724f05bdad3ec04dcc1 | [
"MIT"
] | 376 | 2018-11-02T18:22:23.000Z | 2022-03-24T21:29:19.000Z | deoldify/save.py | TaktakTaktouk/DeOldify | 0ff6139bb09d0abdf535a724f05bdad3ec04dcc1 | [
"MIT"
] | 2,250 | 2018-11-02T15:45:39.000Z | 2022-03-28T17:08:23.000Z | from fastai.basic_train import Learner, LearnerCallback
from fastai.vision.gan import GANLearner
class GANSaveCallback(LearnerCallback):
"""A `LearnerCallback` that saves history of metrics while training `learn` into CSV `filename`."""
def __init__(
self,
learn: GANLearner,
learn_gen: Learner,
filename: str,
save_iters: int = 1000,
):
super().__init__(learn)
self.learn_gen = learn_gen
self.filename = filename
self.save_iters = save_iters
def on_batch_end(self, iteration: int, epoch: int, **kwargs) -> None:
if iteration == 0:
return
if iteration % self.save_iters == 0:
self._save_gen_learner(iteration=iteration, epoch=epoch)
def _save_gen_learner(self, iteration: int, epoch: int):
filename = '{}_{}_{}'.format(self.filename, epoch, iteration)
self.learn_gen.save(filename)
| 31.2 | 103 | 0.645299 | from fastai.basic_train import Learner, LearnerCallback
from fastai.vision.gan import GANLearner
class GANSaveCallback(LearnerCallback):
def __init__(
self,
learn: GANLearner,
learn_gen: Learner,
filename: str,
save_iters: int = 1000,
):
super().__init__(learn)
self.learn_gen = learn_gen
self.filename = filename
self.save_iters = save_iters
def on_batch_end(self, iteration: int, epoch: int, **kwargs) -> None:
if iteration == 0:
return
if iteration % self.save_iters == 0:
self._save_gen_learner(iteration=iteration, epoch=epoch)
def _save_gen_learner(self, iteration: int, epoch: int):
filename = '{}_{}_{}'.format(self.filename, epoch, iteration)
self.learn_gen.save(filename)
| true | true |
f71756f9227d14924ce1c8f11117e55a80ba40c0 | 1,904 | py | Python | tests/test_args.py | rauljim/passgen | ca55c08b1ab0439d598dc045982b6971bfee1629 | [
"MIT"
] | null | null | null | tests/test_args.py | rauljim/passgen | ca55c08b1ab0439d598dc045982b6971bfee1629 | [
"MIT"
] | null | null | null | tests/test_args.py | rauljim/passgen | ca55c08b1ab0439d598dc045982b6971bfee1629 | [
"MIT"
] | null | null | null | from passgen import args
def test_num_words():
mock_argv = ['passgen', '-n', '22']
options = args.get_cli_options(mock_argv)
assert 22 == options.num_words
mock_argv = ['passgen', '--num-words', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.num_words
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_NUM_WORDS == options.num_words
def test_count():
mock_argv = ['passgen', '-c', '22']
options = args.get_cli_options(mock_argv)
assert 22 == options.count
mock_argv = ['passgen', '--count', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.count
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_COUNT == options.count
mock_argv = ['passgen', '--count', '-1'] # negative value ignored
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_COUNT == options.count
def test_min_chars():
mock_argv = ['passgen', '--min-chars', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.min_chars
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_MIN_CHARS == options.min_chars
def test_max_chars():
mock_argv = ['passgen', '--max-chars', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.max_chars
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_MAX_CHARS == options.max_chars
def test_conflicting_min_max_chars():
mock_argv = ['passgen', '--min-chars', '9999', '--max-chars', '11']
options = args.get_cli_options(mock_argv)
assert 9999 == options.min_chars
assert args.DEFAULT_MAX_CHARS == options.max_chars
def test_get_defaults():
options = args.get_default_options()
assert args.DEFAULT_COUNT == options.count
| 32.271186 | 71 | 0.688025 | from passgen import args
def test_num_words():
mock_argv = ['passgen', '-n', '22']
options = args.get_cli_options(mock_argv)
assert 22 == options.num_words
mock_argv = ['passgen', '--num-words', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.num_words
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_NUM_WORDS == options.num_words
def test_count():
mock_argv = ['passgen', '-c', '22']
options = args.get_cli_options(mock_argv)
assert 22 == options.count
mock_argv = ['passgen', '--count', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.count
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_COUNT == options.count
mock_argv = ['passgen', '--count', '-1']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_COUNT == options.count
def test_min_chars():
mock_argv = ['passgen', '--min-chars', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.min_chars
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_MIN_CHARS == options.min_chars
def test_max_chars():
mock_argv = ['passgen', '--max-chars', '33']
options = args.get_cli_options(mock_argv)
assert 33 == options.max_chars
mock_argv = ['passgen']
options = args.get_cli_options(mock_argv)
assert args.DEFAULT_MAX_CHARS == options.max_chars
def test_conflicting_min_max_chars():
mock_argv = ['passgen', '--min-chars', '9999', '--max-chars', '11']
options = args.get_cli_options(mock_argv)
assert 9999 == options.min_chars
assert args.DEFAULT_MAX_CHARS == options.max_chars
def test_get_defaults():
options = args.get_default_options()
assert args.DEFAULT_COUNT == options.count
| true | true |
f717576ebe1b232b2fdba0695ea262b2ae5063cc | 2,568 | py | Python | src/base/base_train.py | MohamedAli1995/Cifar-100-Classifier | 924704a81ce13062825a88b90b80e8ac2ba45d63 | [
"MIT"
] | 2 | 2019-05-12T16:11:20.000Z | 2020-04-10T22:39:57.000Z | src/base/base_train.py | MohamedAli1995/Cifar-100-Classifier | 924704a81ce13062825a88b90b80e8ac2ba45d63 | [
"MIT"
] | null | null | null | src/base/base_train.py | MohamedAli1995/Cifar-100-Classifier | 924704a81ce13062825a88b90b80e8ac2ba45d63 | [
"MIT"
] | null | null | null | import tensorflow as tf
class BaseTrain:
"""Standard base_train-class for easy multiple-inheritance.
It is responsible for defining the functions to be implemented with any child.
Attributes:
sess: Tensorflow session to use.
model: Model to be trained.
data: Data_loader object to interact with dataset.
config: Config object to store data related to training, testing and validation.
logger: Logger object to use tensorboard.
"""
def __init__(self, sess, model, data, config, logger):
self.model = model
self.config = config
self.sess = sess
self.data = data
self.logger = logger
if not self.config.pretrain: # If not pretrain then initialize variables.
self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init)
def train(self):
"""Train the model for the number of epochs in config.num_epochs.
Calls validate_epoch if config.use_val is set to true and per config.val_per_epoch.
Returns:
"""
for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):
self.data.prepare_new_epoch_data()
self.train_epoch()
if self.config.use_val and (
cur_epoch % self.config.val_per_epoch == 0 or cur_epoch == self.config.num_epochs):
self.validate_epoch()
self.sess.run(self.model.increment_cur_epoch_tensor)
def train_epoch(self):
"""Implements the logic of training_epoch:
-Loop over the batches of the training data and call the train step for each.
-Add any summaries you want using the summary
"""
raise NotImplemented
def train_step(self):
"""Implements the logic of the train step:
-Run the tensorflow session
-Returns:
Any of the metrics needs to be summarized.
"""
raise NotImplementedError
def validate_epoch(self):
"""Implements the logic of validation_epoch:
-Loop over the batches of the validation data and call the validate step for each.
-Add any summaries you want using the summary
"""
raise NotImplemented
def validate_step(self):
"""Implements the logic of the validate step:
-Run the tensorflow session
-Returns:
Any of the metrics needs to be summarized.
"""
raise NotImplemented
| 36.169014 | 115 | 0.640576 | import tensorflow as tf
class BaseTrain:
def __init__(self, sess, model, data, config, logger):
self.model = model
self.config = config
self.sess = sess
self.data = data
self.logger = logger
if not self.config.pretrain:
self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init)
def train(self):
for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):
self.data.prepare_new_epoch_data()
self.train_epoch()
if self.config.use_val and (
cur_epoch % self.config.val_per_epoch == 0 or cur_epoch == self.config.num_epochs):
self.validate_epoch()
self.sess.run(self.model.increment_cur_epoch_tensor)
def train_epoch(self):
raise NotImplemented
def train_step(self):
raise NotImplementedError
def validate_epoch(self):
raise NotImplemented
def validate_step(self):
raise NotImplemented
| true | true |
f7175782c5dfa546dd124965a86fa50da687c7ac | 1,267 | py | Python | SIGNUS/app/api/signus_v1/post.py | 837477/SIGNUS | cd395dfd45d2c36d09ec9a8069e6e52e19f058e8 | [
"MIT"
] | null | null | null | SIGNUS/app/api/signus_v1/post.py | 837477/SIGNUS | cd395dfd45d2c36d09ec9a8069e6e52e19f058e8 | [
"MIT"
] | null | null | null | SIGNUS/app/api/signus_v1/post.py | 837477/SIGNUS | cd395dfd45d2c36d09ec9a8069e6e52e19f058e8 | [
"MIT"
] | null | null | null | '''
SIGNUS V1 post API
'''
from flask import g
from app.api.signus_v1 import signus_v1 as api
from app.api.decorators import timer, login_required, login_optional
from app.controllers.post import (post_like,
post_unlike,
post_view)
@api.route("/post/like/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_like(post_oid):
''' 게시글 좋아요 '''
return {
"msg": "success",
"result": post_like(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/unlike/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_unlike(post_oid):
''' 게시글 좋아요 취소 '''
return {
"msg": "success",
"result": post_unlike(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/view/<string:post_oid>", methods=["PATCH"])
@timer
@login_optional
def signus_v1_post_view(post_oid):
''' 게시글 조회수 '''
if 'user' in g:
result = post_view(g.mongo_cur, post_oid, g.user)
else:
result = post_view(g.mongo_cur, post_oid)
return {
"msg": "success",
"result": result
}
| 24.843137 | 68 | 0.556433 | from flask import g
from app.api.signus_v1 import signus_v1 as api
from app.api.decorators import timer, login_required, login_optional
from app.controllers.post import (post_like,
post_unlike,
post_view)
@api.route("/post/like/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_like(post_oid):
return {
"msg": "success",
"result": post_like(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/unlike/<string:post_oid>", methods=["PATCH"])
@timer
@login_required
def signus_v1_post_unlike(post_oid):
return {
"msg": "success",
"result": post_unlike(g.mongo_cur,
post_oid,
g.user)
}
@api.route("/post/view/<string:post_oid>", methods=["PATCH"])
@timer
@login_optional
def signus_v1_post_view(post_oid):
if 'user' in g:
result = post_view(g.mongo_cur, post_oid, g.user)
else:
result = post_view(g.mongo_cur, post_oid)
return {
"msg": "success",
"result": result
}
| true | true |
f71759ee5d329c4385a20b4d6bd880bfb741c347 | 1,398 | py | Python | migrations/versions/f0a99f6b5e5e_.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 4 | 2017-05-11T14:50:32.000Z | 2020-01-10T09:02:27.000Z | migrations/versions/f0a99f6b5e5e_.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 145 | 2017-04-07T11:01:58.000Z | 2019-12-11T15:30:23.000Z | migrations/versions/f0a99f6b5e5e_.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 3 | 2017-10-25T12:36:16.000Z | 2018-04-26T08:49:34.000Z | """empty message
Revision ID: f0a99f6b5e5e
Revises: he536vdwh29f
Create Date: 2019-05-31 15:57:36.032393
"""
# revision identifiers, used by Alembic.
revision = 'f0a99f6b5e5e'
down_revision = 'he536vdwh29f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('instance_tokens',
sa.Column('token', sa.String(length=32), nullable=False),
sa.Column('instance_id', sa.String(length=32), nullable=True),
sa.Column('expires_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['instance_id'], ['instances.id'], name=op.f('fk_instance_tokens_instance_id_instances')),
sa.PrimaryKeyConstraint('token', name=op.f('pk_instance_tokens'))
)
# op.create_unique_constraint(op.f('uq_users_email_id'), 'users', ['email_id'])
# op.create_unique_constraint(op.f('uq_users_eppn'), 'users', ['eppn'])
# op.drop_constraint(u'uq_users_email', 'users', type_='unique')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
# op.create_unique_constraint(u'uq_users_email', 'users', ['eppn'])
# op.drop_constraint(op.f('uq_users_eppn'), 'users', type_='unique')
# op.drop_constraint(op.f('uq_users_email_id'), 'users', type_='unique')
op.drop_table('instance_tokens')
### end Alembic commands ###
| 35.846154 | 118 | 0.700286 |
revision = 'f0a99f6b5e5e'
down_revision = 'he536vdwh29f'
from alembic import op
import sqlalchemy as sa
def upgrade():
ance_id', sa.String(length=32), nullable=True),
sa.Column('expires_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['instance_id'], ['instances.id'], name=op.f('fk_instance_tokens_instance_id_instances')),
sa.PrimaryKeyConstraint('token', name=op.f('pk_instance_tokens'))
)
| true | true |
f71759fb13c6dffdc6c632f41e1c01f82a06b50a | 103,166 | py | Python | python/pyarrow/tests/test_parquet.py | sparkma/arrow | 62fd703a4ef0abbecb02397a06a630a9dee382d9 | [
"Apache-2.0"
] | null | null | null | python/pyarrow/tests/test_parquet.py | sparkma/arrow | 62fd703a4ef0abbecb02397a06a630a9dee382d9 | [
"Apache-2.0"
] | null | null | null | python/pyarrow/tests/test_parquet.py | sparkma/arrow | 62fd703a4ef0abbecb02397a06a630a9dee382d9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import datetime
import decimal
import io
import json
import os
import six
import pickle
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.compat import guid, u, BytesIO, unichar, PY2
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.filesystem import LocalFileSystem, FileSystem
try:
import pyarrow.parquet as pq
except ImportError:
pq = None
try:
import pandas as pd
import pandas.util.testing as tm
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
except ImportError:
pd = tm = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not parquet'
pytestmark = pytest.mark.parquet
@pytest.fixture(scope='module')
def datadir(datadir):
return datadir / 'parquet'
def _write_table(table, path, **kwargs):
# So we see the ImportError somewhere
import pyarrow.parquet as pq
if _pandas_api.is_data_frame(table):
table = pa.Table.from_pandas(table)
pq.write_table(table, path, **kwargs)
return table
def _read_table(*args, **kwargs):
return pq.read_table(*args, **kwargs)
def _roundtrip_table(table, read_table_kwargs=None,
write_table_kwargs=None):
read_table_kwargs = read_table_kwargs or {}
write_table_kwargs = write_table_kwargs or {}
buf = io.BytesIO()
_write_table(table, buf, **write_table_kwargs)
buf.seek(0)
return _read_table(buf, **read_table_kwargs)
def _check_roundtrip(table, expected=None, read_table_kwargs=None,
**write_table_kwargs):
if expected is None:
expected = table
read_table_kwargs = read_table_kwargs or {}
# intentionally check twice
result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
def _roundtrip_pandas_dataframe(df, write_kwargs):
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, **write_kwargs)
buf.seek(0)
table1 = _read_table(buf)
return table1.to_pandas()
@pytest.mark.parametrize('dtype', [int, float])
def test_single_pylist_column_roundtrip(tempdir, dtype):
filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)
data = [pa.array(list(map(dtype, range(5))))]
table = pa.Table.from_arrays(data, names=['a'])
_write_table(table, filename)
table_read = _read_table(filename)
for i in range(table.num_columns):
col_written = table[i]
col_read = table_read[i]
assert table.field(i).name == table_read.field(i).name
assert col_read.num_chunks == 1
data_written = col_written.chunk(0)
data_read = col_read.chunk(0)
assert data_written.equals(data_read)
def alltypes_sample(size=10000, seed=0, categorical=False):
np.random.seed(seed)
arrays = {
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Test other timestamp resolutions now that arrow supports
# them
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': pd.Series([str(x) for x in range(size)]),
'empty_str': [''] * size,
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'null': [None] * size,
'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],
}
if categorical:
arrays['str_category'] = arrays['str'].astype('category')
return pd.DataFrame(arrays)
@pytest.mark.pandas
@pytest.mark.parametrize('chunk_size', [None, 1000])
def test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):
df = alltypes_sample(size=10000, categorical=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version="2.0",
coerce_timestamps='ms', chunk_size=chunk_size)
table_read = pq.read_pandas(filename)
assert table_read.schema.pandas_metadata is not None
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def test_set_data_page_size():
arr = pa.array([1, 2, 3] * 1000000)
t = pa.Table.from_arrays([arr], names=['f0'])
# 128K, 256K, 512K
page_sizes = [2 << 16, 2 << 17, 2 << 18]
for target_page_size in page_sizes:
_check_roundtrip(t, data_page_size=target_page_size)
@pytest.mark.pandas
def test_chunked_table_write():
# ARROW-232
df = alltypes_sample(size=10)
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
df, _ = dataframe_with_lists()
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_no_memory_map(tempdir):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'memory_map': False},
version='2.0')
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, memory_map=False)
assert table_read.equals(table)
def test_special_chars_filename(tempdir):
table = pa.Table.from_arrays([pa.array([42])], ["ints"])
filename = "foo # bar"
path = tempdir / filename
assert not path.exists()
_write_table(table, str(path))
assert path.exists()
table_read = _read_table(str(path))
assert table_read.equals(table)
@pytest.mark.pandas
def test_empty_table_roundtrip():
df = alltypes_sample(size=10)
# Create a non-empty table to infer the types correctly, then slice to 0
table = pa.Table.from_pandas(df)
table = pa.Table.from_arrays(
[col.chunk(0)[:0] for col in table.itercolumns()],
names=table.schema.names)
assert table.schema.field_by_name('null').type == pa.null()
assert table.schema.field_by_name('null_list').type == pa.list_(pa.null())
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_empty_table_no_columns():
df = pd.DataFrame()
empty = pa.Table.from_pandas(df, preserve_index=False)
_check_roundtrip(empty)
def test_empty_lists_table_roundtrip():
# ARROW-2744: Shouldn't crash when writing an array of empty lists
arr = pa.array([[], []], type=pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr], ["A"])
_check_roundtrip(table)
@pytest.mark.pandas
def test_pandas_parquet_datetime_tz():
s = pd.Series([datetime.datetime(2017, 9, 6)])
s = s.dt.tz_localize('utc')
s.index = s
# Both a column and an index to hit both use cases
df = pd.DataFrame({'tz_aware': s,
'tz_eastern': s.dt.tz_convert('US/Eastern')},
index=s)
f = BytesIO()
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, f, coerce_timestamps='ms')
f.seek(0)
table_read = pq.read_pandas(f)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@pytest.mark.skipif(six.PY2, reason='datetime.timezone is available since '
'python version 3.2')
def test_datetime_timezone_tzinfo():
value = datetime.datetime(2018, 1, 1, 1, 23, 45,
tzinfo=datetime.timezone.utc)
df = pd.DataFrame({'foo': [value]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_pandas_parquet_column_multiindex(tempdir):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_1_0_roundtrip(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename, version='1.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
# We pass uint32_t as int64_t if we write Parquet version 1.0
df['uint32'] = df['uint32'].values.astype(np.int64)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_multiple_path_types(tempdir):
# Test compatibility with PEP 519 path-like objects
path = tempdir / 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# Test compatibility with plain string paths
path = str(tempdir) + 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_column_selection(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(filename, columns=['uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(filename, columns=['uint8', 'uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
def _random_integers(size, dtype):
# We do not generate integers outside the int64 range
platform_int_info = np.iinfo('int_')
iinfo = np.iinfo(dtype)
return np.random.randint(max(iinfo.min, platform_int_info.min),
min(iinfo.max, platform_int_info.max),
size=size).astype(dtype)
def _test_dataframe(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': _random_integers(size, np.uint8),
'uint16': _random_integers(size, np.uint16),
'uint32': _random_integers(size, np.uint32),
'uint64': _random_integers(size, np.uint64),
'int8': _random_integers(size, np.int8),
'int16': _random_integers(size, np.int16),
'int32': _random_integers(size, np.int32),
'int64': _random_integers(size, np.int64),
'float32': np.random.randn(size).astype(np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': [tm.rands(10) for i in range(size)],
'all_none': [None] * size,
'all_none_category': [None] * size
})
# TODO(PARQUET-1015)
# df['all_none_category'] = df['all_none_category'].astype('category')
return df
@pytest.mark.pandas
def test_pandas_parquet_native_file_roundtrip(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_parquet_incremental_file_build(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
writer.close()
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_read_pandas_column_subset(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
def test_pandas_parquet_empty_roundtrip(tempdir):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_pyfile_roundtrip(tempdir):
filename = tempdir / 'pandas_pyfile_roundtrip.parquet'
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with filename.open('wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(filename.read_bytes())
table_read = _read_table(data)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_configuration_options(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename, version='2.0',
use_dictionary=use_dictionary)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for write_statistics in [True, False]:
_write_table(arrow_table, filename, version='2.0',
write_statistics=write_statistics)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:
_write_table(arrow_table, filename, version='2.0',
compression=compression)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def make_sample_file(table_or_df):
if isinstance(table_or_df, pa.Table):
a_table = table_or_df
else:
a_table = pa.Table.from_pandas(table_or_df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='SNAPPY', version='2.0',
coerce_timestamps='ms')
buf.seek(0)
return pq.ParquetFile(buf)
@pytest.mark.pandas
def test_parquet_metadata_api():
df = alltypes_sample(size=10000)
df = df.reindex(columns=sorted(df.columns))
df.index = np.random.randint(0, 1000000, size=len(df))
fileh = make_sample_file(df)
ncols = len(df.columns)
# Series of sniff tests
meta = fileh.metadata
repr(meta)
assert meta.num_rows == len(df)
assert meta.num_columns == ncols + 1 # +1 for index
assert meta.num_row_groups == 1
assert meta.format_version == '2.0'
assert 'parquet-cpp' in meta.created_by
assert isinstance(meta.serialized_size, int)
assert isinstance(meta.metadata, dict)
# Schema
schema = fileh.schema
assert meta.schema is schema
assert len(schema) == ncols + 1 # +1 for index
repr(schema)
col = schema[0]
repr(col)
assert col.name == df.columns[0]
assert col.max_definition_level == 1
assert col.max_repetition_level == 0
assert col.max_repetition_level == 0
assert col.physical_type == 'BOOLEAN'
assert col.converted_type == 'NONE'
with pytest.raises(IndexError):
schema[ncols + 1] # +1 for index
with pytest.raises(IndexError):
schema[-1]
# Row group
for rg in range(meta.num_row_groups):
rg_meta = meta.row_group(rg)
assert isinstance(rg_meta, pq.RowGroupMetaData)
repr(rg_meta)
for col in range(rg_meta.num_columns):
col_meta = rg_meta.column(col)
assert isinstance(col_meta, pq.ColumnChunkMetaData)
repr(col_meta)
with pytest.raises(IndexError):
meta.row_group(-1)
with pytest.raises(IndexError):
meta.row_group(meta.num_row_groups + 1)
rg_meta = meta.row_group(0)
assert rg_meta.num_rows == len(df)
assert rg_meta.num_columns == ncols + 1 # +1 for index
assert rg_meta.total_byte_size > 0
with pytest.raises(IndexError):
col_meta = rg_meta.column(-1)
with pytest.raises(IndexError):
col_meta = rg_meta.column(ncols + 2)
col_meta = rg_meta.column(0)
assert col_meta.file_offset > 0
assert col_meta.file_path == '' # created from BytesIO
assert col_meta.physical_type == 'BOOLEAN'
assert col_meta.num_values == 10000
assert col_meta.path_in_schema == 'bool'
assert col_meta.is_stats_set is True
assert isinstance(col_meta.statistics, pq.Statistics)
assert col_meta.compression == 'SNAPPY'
assert col_meta.encodings == ('PLAIN', 'RLE')
assert col_meta.has_dictionary_page is False
assert col_meta.dictionary_page_offset is None
assert col_meta.data_page_offset > 0
assert col_meta.total_compressed_size > 0
assert col_meta.total_uncompressed_size > 0
with pytest.raises(NotImplementedError):
col_meta.has_index_page
with pytest.raises(NotImplementedError):
col_meta.index_page_offset
@pytest.mark.pandas
@pytest.mark.parametrize(
(
'data',
'type',
'physical_type',
'min_value',
'max_value',
'null_count',
'num_values',
'distinct_count'
),
[
([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float32(),
'FLOAT', -1.1, 4.4, 1, 4, 0
),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float64(),
'DOUBLE', -1.1, 4.4, 1, 4, 0
),
(
[u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),
'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0
),
(
[True, False, False, True, True], pa.bool_(),
'BOOLEAN', False, True, 0, 5, 0
),
(
[b'\x00', b'b', b'12', None, b'aaa'], pa.binary(),
'BYTE_ARRAY', b'\x00', b'b', 1, 4, 0
),
]
)
def test_parquet_column_statistics_api(data, type, physical_type, min_value,
max_value, null_count, num_values,
distinct_count):
df = pd.DataFrame({'data': data})
schema = pa.schema([pa.field('data', type)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
fileh = make_sample_file(table)
meta = fileh.metadata
rg_meta = meta.row_group(0)
col_meta = rg_meta.column(0)
stat = col_meta.statistics
assert stat.has_min_max
assert _close(type, stat.min, min_value)
assert _close(type, stat.max, max_value)
assert stat.null_count == null_count
assert stat.num_values == num_values
# TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount
# method, missing distinct_count is represented as zero instead of None
assert stat.distinct_count == distinct_count
assert stat.physical_type == physical_type
def _close(type, left, right):
if type == pa.float32():
return abs(left - right) < 1E-7
elif type == pa.float64():
return abs(left - right) < 1E-13
else:
return left == right
def test_statistics_convert_logical_types(tempdir):
# ARROW-5166, ARROW-4139
# (min, max, type)
cases = [(10, 11164359321221007157, pa.uint64()),
(10, 4294967295, pa.uint32()),
(u"ähnlich", u"öffentlich", pa.utf8()),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time32('ms')),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time64('us')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('ms')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('us'))]
for i, (min_val, max_val, typ) in enumerate(cases):
t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],
['col'])
path = str(tempdir / ('example{}.parquet'.format(i)))
pq.write_table(t, path, version='2.0')
pf = pq.ParquetFile(path)
stats = pf.metadata.row_group(0).column(0).statistics
assert stats.min == min_val
assert stats.max == max_val
def test_parquet_write_disable_statistics(tempdir):
table = pa.Table.from_pydict(
{'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})
_write_table(table, tempdir / 'data.parquet')
meta = pq.read_metadata(tempdir / 'data.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is True
assert cc.statistics is not None
_write_table(table, tempdir / 'data2.parquet', write_statistics=False)
meta = pq.read_metadata(tempdir / 'data2.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is False
assert cc.statistics is None
_write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])
meta = pq.read_metadata(tempdir / 'data3.parquet')
cc_a = meta.row_group(0).column(0)
assert cc_a.is_stats_set is True
assert cc_a.statistics is not None
cc_b = meta.row_group(0).column(1)
assert cc_b.is_stats_set is False
assert cc_b.statistics is None
@pytest.mark.pandas
def test_compare_schemas():
df = alltypes_sample(size=10000)
fileh = make_sample_file(df)
fileh2 = make_sample_file(df)
fileh3 = make_sample_file(df[df.columns[::2]])
# ParquetSchema
assert isinstance(fileh.schema, pq.ParquetSchema)
assert fileh.schema.equals(fileh.schema)
assert fileh.schema == fileh.schema
assert fileh.schema.equals(fileh2.schema)
assert fileh.schema == fileh2.schema
assert fileh.schema != 'arbitrary object'
assert not fileh.schema.equals(fileh3.schema)
assert fileh.schema != fileh3.schema
# ColumnSchema
assert isinstance(fileh.schema[0], pq.ColumnSchema)
assert fileh.schema[0].equals(fileh.schema[0])
assert fileh.schema[0] == fileh.schema[0]
assert not fileh.schema[0].equals(fileh.schema[1])
assert fileh.schema[0] != fileh.schema[1]
assert fileh.schema[0] != 'arbitrary object'
def test_validate_schema_write_table(tempdir):
# ARROW-2926
simple_fields = [
pa.field('POS', pa.uint32()),
pa.field('desc', pa.string())
]
simple_schema = pa.schema(simple_fields)
# simple_table schema does not match simple_schema
simple_from_array = [pa.array([1]), pa.array(['bla'])]
simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])
path = tempdir / 'simple_validate_schema.parquet'
with pq.ParquetWriter(path, simple_schema,
version='2.0',
compression='snappy', flavor='spark') as w:
with pytest.raises(ValueError):
w.write_table(simple_table)
@pytest.mark.pandas
def test_column_of_arrays(tempdir):
df, schema = dataframe_with_arrays()
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_coerce_timestamps(tempdir):
from collections import OrderedDict
# ARROW-622
arrays = OrderedDict()
fields = [pa.field('datetime64',
pa.list_(pa.timestamp('ms')))]
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='us')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
df_expected = df.copy()
for i, x in enumerate(df_expected['datetime64']):
if isinstance(x, np.ndarray):
df_expected['datetime64'][i] = x.astype('M8[us]')
tm.assert_frame_equal(df_expected, df_read)
with pytest.raises(ValueError):
_write_table(arrow_table, filename, version='2.0',
coerce_timestamps='unknown')
@pytest.mark.pandas
def test_coerce_timestamps_truncated(tempdir):
"""
ARROW-2555: Test that we can truncate timestamps when coercing if
explicitly allowed.
"""
dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1, microsecond=1)
dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1)
fields_us = [pa.field('datetime64', pa.timestamp('us'))]
arrays_us = {'datetime64': [dt_us, dt_ms]}
df_us = pd.DataFrame(arrays_us)
schema_us = pa.schema(fields_us)
filename = tempdir / 'pandas_truncated.parquet'
table_us = pa.Table.from_pandas(df_us, schema=schema_us)
_write_table(table_us, filename, version="2.0", coerce_timestamps='ms',
allow_truncated_timestamps=True)
table_ms = _read_table(filename)
df_ms = table_ms.to_pandas()
arrays_expected = {'datetime64': [dt_ms, dt_ms]}
df_expected = pd.DataFrame(arrays_expected)
tm.assert_frame_equal(df_expected, df_ms)
@pytest.mark.pandas
def test_column_of_lists(tempdir):
df, schema = dataframe_with_lists(parquet_compatible=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version='2.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
if PY2:
# assert_frame_equal fails when comparing datetime.date and
# np.datetime64, even with check_datetimelike_compat=True so
# convert the values to np.datetime64 instead
for col in ['date32[day]_list', 'date64[ms]_list']:
df[col] = df[col].apply(
lambda x: list(map(np.datetime64, x)) if x else x
)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_date_time_types(tempdir):
t1 = pa.date32()
data1 = np.array([17259, 17260, 17261], dtype='int32')
a1 = pa.array(data1, type=t1)
t2 = pa.date64()
data2 = data1.astype('int64') * 86400000
a2 = pa.array(data2, type=t2)
t3 = pa.timestamp('us')
start = pd.Timestamp('2001-01-01').value / 1000
data3 = np.array([start, start + 1, start + 2], dtype='int64')
a3 = pa.array(data3, type=t3)
t4 = pa.time32('ms')
data4 = np.arange(3, dtype='i4')
a4 = pa.array(data4, type=t4)
t5 = pa.time64('us')
a5 = pa.array(data4.astype('int64'), type=t5)
t6 = pa.time32('s')
a6 = pa.array(data4, type=t6)
ex_t6 = pa.time32('ms')
ex_a6 = pa.array(data4 * 1000, type=ex_t6)
t7 = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data7 = np.array([start, start + 1000, start + 2000],
dtype='int64')
a7 = pa.array(data7, type=t7)
table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
# date64 as date32
# time32[s] to time32[ms]
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0')
t0 = pa.timestamp('ms')
data0 = np.arange(4, dtype='int64')
a0 = pa.array(data0, type=t0)
t1 = pa.timestamp('us')
data1 = np.arange(4, dtype='int64')
a1 = pa.array(data1, type=t1)
t2 = pa.timestamp('ns')
data2 = np.arange(4, dtype='int64')
a2 = pa.array(data2, type=t2)
table = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
expected = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
# int64 for all timestamps supported by default
filename = tempdir / 'int64_timestamps.parquet'
_write_table(table, filename, version='2.0')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT64'
read_table = _read_table(filename)
assert read_table.equals(expected)
t0_ns = pa.timestamp('ns')
data0_ns = np.array(data0 * 1000000, dtype='int64')
a0_ns = pa.array(data0_ns, type=t0_ns)
t1_ns = pa.timestamp('ns')
data1_ns = np.array(data1 * 1000, dtype='int64')
a1_ns = pa.array(data1_ns, type=t1_ns)
expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
# int96 nanosecond timestamps produced upon request
filename = tempdir / 'explicit_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
use_deprecated_int96_timestamps=True)
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
# int96 nanosecond timestamps implied by flavor 'spark'
filename = tempdir / 'spark_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
flavor='spark')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
def test_timestamp_restore_timezone():
# ARROW-5888, restore timezone from serialized metadata
ty = pa.timestamp('ms', tz='America/New_York')
arr = pa.array([1, 2, 3], type=ty)
t = pa.table([arr], names=['f0'])
_check_roundtrip(t)
@pytest.mark.pandas
def test_list_of_datetime_time_roundtrip():
# ARROW-4135
times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',
'11:30', '12:00'])
df = pd.DataFrame({'time': [times.time]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_parquet_version_timestamp_differences():
i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000
d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')
d_ms = d_s * 1000
d_us = d_ms * 1000
d_ns = d_us * 1000
a_s = pa.array(d_s, type=pa.timestamp('s'))
a_ms = pa.array(d_ms, type=pa.timestamp('ms'))
a_us = pa.array(d_us, type=pa.timestamp('us'))
a_ns = pa.array(d_ns, type=pa.timestamp('ns'))
names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']
table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)
# Using Parquet version 1.0, seconds should be coerced to milliseconds
# and nanoseconds should be coerced to microseconds by default
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)
_check_roundtrip(table, expected)
# Using Parquet version 2.0, seconds should be coerced to milliseconds
# and nanoseconds should be retained by default
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)
_check_roundtrip(table, expected, version='2.0')
# Using Parquet version 1.0, coercing to milliseconds or microseconds
# is allowed
expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)
_check_roundtrip(table, expected, coerce_timestamps='ms')
# Using Parquet version 2.0, coercing to milliseconds or microseconds
# is allowed
expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)
_check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')
# TODO: after pyarrow allows coerce_timestamps='ns', tests like the
# following should pass ...
# Using Parquet version 1.0, coercing to nanoseconds is not allowed
# expected = None
# with pytest.raises(NotImplementedError):
# _roundtrip_table(table, coerce_timestamps='ns')
# Using Parquet version 2.0, coercing to nanoseconds is allowed
# expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
# _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns')
# For either Parquet version, coercing to nanoseconds is allowed
# if Int96 storage is used
expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
_check_roundtrip(table, expected,
use_deprecated_int96_timestamps=True)
_check_roundtrip(table, expected, version='2.0',
use_deprecated_int96_timestamps=True)
def test_large_list_records():
# This was fixed in PARQUET-1100
list_lengths = np.random.randint(0, 500, size=50)
list_lengths[::10] = 0
list_values = [list(map(int, np.random.randint(0, 100, size=x)))
if i % 8 else None
for i, x in enumerate(list_lengths)]
a1 = pa.array(list_values)
table = pa.Table.from_arrays([a1], ['int_lists'])
_check_roundtrip(table)
def test_sanitized_spark_field_names():
a0 = pa.array([0, 1, 2, 3, 4])
name = 'prohib; ,\t{}'
table = pa.Table.from_arrays([a0], [name])
result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})
expected_name = 'prohib______'
assert result.schema[0].name == expected_name
@pytest.mark.pandas
def test_spark_flavor_preserves_pandas_metadata():
df = _test_dataframe(size=100)
df.index = np.arange(0, 10 * len(df), 10)
df.index.name = 'foo'
result = _roundtrip_pandas_dataframe(df, {'version': '2.0',
'flavor': 'spark'})
tm.assert_frame_equal(result, df)
def test_fixed_size_binary():
t0 = pa.binary(10)
data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']
a0 = pa.array(data, type=t0)
table = pa.Table.from_arrays([a0],
['binary[10]'])
_check_roundtrip(table)
@pytest.mark.pandas
def test_multithreaded_read():
df = alltypes_sample(size=10000)
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, compression='SNAPPY', version='2.0')
buf.seek(0)
table1 = _read_table(buf, use_threads=True)
buf.seek(0)
table2 = _read_table(buf, use_threads=False)
assert table1.equals(table2)
@pytest.mark.pandas
def test_min_chunksize():
data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])
table = pa.Table.from_pandas(data.reset_index())
buf = io.BytesIO()
_write_table(table, buf, chunk_size=-1)
buf.seek(0)
result = _read_table(buf)
assert result.equals(table)
with pytest.raises(ValueError):
_write_table(table, buf, chunk_size=0)
@pytest.mark.pandas
def test_pass_separate_metadata():
# ARROW-471
df = alltypes_sample(size=10000)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='snappy', version='2.0')
buf.seek(0)
metadata = pq.read_metadata(buf)
buf.seek(0)
fileh = pq.ParquetFile(buf, metadata=metadata)
tm.assert_frame_equal(df, fileh.read().to_pandas())
@pytest.mark.pandas
def test_read_single_row_group():
# ARROW-471
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
row_groups = [pf.read_row_group(i) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df, result.to_pandas())
@pytest.mark.pandas
def test_read_single_row_group_with_column_subset():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = list(df.columns[:2])
row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
@pytest.mark.pandas
def test_scan_contents():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.scan_contents() == 10000
assert pf.scan_contents(df.columns[:4]) == 10000
@pytest.mark.pandas
def test_parquet_piece_read(tempdir):
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@pytest.mark.pandas
def test_parquet_piece_open_and_get_metadata(tempdir):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece = pq.ParquetDatasetPiece(path)
table1 = piece.read()
assert isinstance(table1, pa.Table)
meta1 = piece.get_metadata()
assert isinstance(meta1, pq.FileMetaData)
assert table == table1
def test_parquet_piece_basics():
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
def test_partition_set_dictionary_type():
set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@pytest.mark.pandas
def test_read_partitioned_directory(tempdir):
fs = LocalFileSystem.get_instance()
_partition_test_for_filesystem(fs, tempdir)
@pytest.mark.pandas
def test_create_parquet_dataset_multi_threaded(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
manifest = pq.ParquetManifest(base_path, filesystem=fs,
metadata_nthreads=1)
dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)
assert len(dataset.pieces) > 0
partitions = dataset.partitions
assert len(partitions.partition_names) > 0
assert partitions.partition_names == manifest.partitions.partition_names
assert len(partitions.levels) == len(manifest.partitions.levels)
@pytest.mark.pandas
def test_equivalency(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
# Old filters syntax:
# integer == 1 AND string != b AND boolean == True
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', '=', 1), ('string', '!=', 'b'),
('boolean', '==', True)]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'b' not in result_df['string'].values
assert False not in result_df['boolean'].values
# filters in disjunctive normal form:
# (integer == 1 AND string != b AND boolean == True) OR
# (integer == 2 AND boolean == False)
# TODO(ARROW-3388): boolean columns are reconstructed as string
filters = [
[
('integer', '=', 1),
('string', '!=', 'b'),
('boolean', '==', 'True')
],
[('integer', '=', 0), ('boolean', '==', 'False')]
]
dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
table = dataset.read()
result_df = table.to_pandas().reset_index(drop=True)
# Check that all rows in the DF fulfill the filter
# Pandas 0.23.x has problems with indexing constant memoryviews in
# categoricals. Thus we need to make an explicity copy here with np.array.
df_filter_1 = (np.array(result_df['integer']) == 1) \
& (np.array(result_df['string']) != 'b') \
& (np.array(result_df['boolean']) == 'True')
df_filter_2 = (np.array(result_df['integer']) == 0) \
& (np.array(result_df['boolean']) == 'False')
assert df_filter_1.sum() > 0
assert df_filter_2.sum() > 0
assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())
# Check for \0 in predicate values. Until they are correctly implemented
# in ARROW-3391, they would otherwise lead to weird results with the
# current code.
with pytest.raises(NotImplementedError):
filters = [[('string', '==', b'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
with pytest.raises(NotImplementedError):
filters = [[('string', '==', u'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
@pytest.mark.pandas
def test_cutoff_exclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<', 4),
('integers', '>', 1),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [x for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@pytest.mark.xfail(
raises=TypeError,
reason='Loss of type information in creation of categoricals.'
)
def test_cutoff_exclusive_datetime(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
date_keys = [
datetime.date(2018, 4, 9),
datetime.date(2018, 4, 10),
datetime.date(2018, 4, 11),
datetime.date(2018, 4, 12),
datetime.date(2018, 4, 13)
]
partition_spec = [
['dates', date_keys]
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'dates': np.array(date_keys, dtype='datetime64'),
}, columns=['index', 'dates'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('dates', '<', "2018-04-12"),
('dates', '>', "2018-04-10")
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected = pd.Categorical(
np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),
categories=np.array(date_keys, dtype='datetime64'))
assert result_df['dates'].values == expected
@pytest.mark.pandas
def test_inclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<=', 3),
('integers', '>=', 2),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [int(x) for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
def test_inclusive_set(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),
('boolean', 'in', {True})]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'c' not in result_df['string'].values
assert False not in result_df['boolean'].values
@pytest.mark.pandas
def test_invalid_pred_op(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '=<', 3),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', 'in', set()),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '!=', {3}),
])
@pytest.mark.pandas
def test_filters_read_table(tempdir):
# test that filters keyword is passed through in read_table
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
table = pq.read_table(
base_path, filesystem=fs, filters=[('integers', '<', 3)])
assert table.num_rows == 3
table = pq.read_table(
base_path, filesystem=fs, filters=[[('integers', '<', 3)]])
assert table.num_rows == 3
table = pq.read_pandas(
base_path, filters=[('integers', '<', 3)])
assert table.num_rows == 3
@pytest.yield_fixture
def s3_example():
access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY']
secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY']
bucket_name = os.environ['PYARROW_TEST_S3_BUCKET']
import s3fs
fs = s3fs.S3FileSystem(key=access_key, secret=secret_key)
test_dir = guid()
bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir)
fs.mkdir(bucket_uri)
yield fs, bucket_uri
fs.rm(bucket_uri, recursive=True)
@pytest.mark.pandas
@pytest.mark.s3
def test_read_partitioned_directory_s3fs(s3_example):
from pyarrow.filesystem import S3FSWrapper
fs, bucket_uri = s3_example
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, bucket_uri)
# Check that we can auto-wrap
dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)
dataset.read()
def _partition_test_for_filesystem(fs, base_path):
foo_keys = [0, 1]
bar_keys = ['a', 'b', 'c']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
# partition_spec : list of lists, e.g. [['foo', [0, 1, 2],
# ['bar', ['a', 'b', 'c']]
# part_table : a pyarrow.Table to write to each partition
DEPTH = len(partition_spec)
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = base_dir / '{0}={1}'.format(name, value)
fs.mkdir(level_dir)
if level == DEPTH - 1:
# Generate example data
file_path = level_dir / guid()
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
assert fs.exists(file_path)
(level_dir / '_SUCCESS').touch()
else:
_visit_level(level_dir, level + 1, this_part_keys)
(level_dir / '_SUCCESS').touch()
_visit_level(base_dir, 0, [])
def _test_read_common_metadata_files(fs, base_path):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(base_path)
data_path = os.path.join(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = os.path.join(base_path, '_common_metadata')
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
assert dataset.common_metadata_path == str(metadata_path)
with fs.open(data_path) as f:
common_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(common_schema)
# handle list of one directory
dataset2 = pq.ParquetDataset([base_path], filesystem=fs)
assert dataset2.schema.equals(dataset.schema)
@pytest.mark.pandas
def test_read_common_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
_test_read_common_metadata_files(fs, tempdir)
@pytest.mark.pandas
def test_read_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'data.parquet'
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
with fs.open(data_path) as f:
metadata_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(metadata_schema)
@pytest.mark.pandas
def test_read_schema(tempdir):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'test.parquet'
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
read1 = pq.read_schema(data_path)
read2 = pq.read_schema(data_path, memory_map=True)
assert table.schema.equals(read1, check_metadata=False)
assert table.schema.equals(read2, check_metadata=False)
assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
# to avoid pandas warning
if isinstance(value, (datetime.date, datetime.datetime)):
value = pd.Timestamp(value)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@pytest.mark.pandas
def test_read_multiple_files(tempdir):
nfiles = 10
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
(dirpath / '_SUCCESS.crc').touch()
def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):
dataset = pq.ParquetDataset(paths, **kwargs)
return dataset.read(columns=columns, use_threads=use_threads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
metadata = pq.read_metadata(paths[0])
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)
assert result3.equals(expected)
# Read column subset
to_read = [0, 2, 6, result.num_columns - 1]
col_names = [result.field(i).name for i in to_read]
out = pa.localfs.read_parquet(dirpath, columns=col_names)
expected = pa.Table.from_arrays([result.column(i) for i in to_read],
names=col_names,
metadata=result.schema.metadata)
assert out.equals(expected)
# Read with multiple threads
pa.localfs.read_parquet(dirpath, use_threads=True)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tempdir / '{}.parquet'.format(guid())
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@pytest.mark.pandas
def test_dataset_read_pandas(tempdir):
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_dataset_no_memory_map(tempdir):
# ARROW-2627: Check that we can use ParquetDataset without memory-mapping
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
# TODO(wesm): Not sure how to easily check that memory mapping is _not_
# used. Mocking is not especially easy for pa.memory_map
dataset = pq.ParquetDataset(dirpath, memory_map=False)
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
@pytest.mark.parametrize('preserve_index', [True, False, None])
def test_dataset_read_pandas_common_metadata(tempdir, preserve_index):
# ARROW-1103
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df, preserve_index=preserve_index)
# Obliterate metadata
table = table.replace_schema_metadata(None)
assert table.schema.metadata is None
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
# Write _metadata common file
table_for_metadata = pa.Table.from_pandas(
df, preserve_index=preserve_index
)
pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
expected.index.name = (
df.index.name if preserve_index is not False else None)
tm.assert_frame_equal(result, expected)
def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(file_nrows, seed=i)
path = base_path / '{}.parquet'.format(i)
test_data.append(_write_table(df, path))
paths.append(path)
return paths
@pytest.mark.pandas
def test_ignore_private_directories(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
# private directory
(dirpath / '_impala_staging').mkdir()
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_dot(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '.DS_Store').open('wb') as f:
f.write(b'gibberish')
with (dirpath / '.private').open('wb') as f:
f.write(b'gibberish')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_underscore(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '_committed_123').open('wb') as f:
f.write(b'abcd')
with (dirpath / '_started_321').open('wb') as f:
f.write(b'abcd')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_multiindex_duplicate_values(tempdir):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
filename = tempdir / 'dup_multi_index_levels.parquet'
_write_table(table, filename)
result_table = _read_table(filename)
assert table.equals(result_table)
result_df = result_table.to_pandas()
tm.assert_frame_equal(result_df, df)
@pytest.mark.pandas
def test_write_error_deletes_incomplete_file(tempdir):
# ARROW-1285
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3, freq='ns')})
pdf = pa.Table.from_pandas(df)
filename = tempdir / 'tmp_file'
try:
_write_table(pdf, filename)
except pa.ArrowException:
pass
assert not filename.exists()
@pytest.mark.pandas
def test_noncoerced_nanoseconds_written_without_exception(tempdir):
# ARROW-1957: the Parquet version 2.0 writer preserves Arrow
# nanosecond timestamps by default
n = 9
df = pd.DataFrame({'x': range(n)},
index=pd.DatetimeIndex(start='2017-01-01',
freq='1n',
periods=n))
tb = pa.Table.from_pandas(df)
filename = tempdir / 'written.parquet'
try:
pq.write_table(tb, filename, version='2.0')
except Exception:
pass
assert filename.exists()
recovered_table = pq.read_table(filename)
assert tb.equals(recovered_table)
# Loss of data thru coercion (without explicit override) still an error
filename = tempdir / 'not_written.parquet'
with pytest.raises(ValueError):
pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')
def test_read_non_existent_file(tempdir):
path = 'non-existent-file.parquet'
try:
pq.read_table(path)
except Exception as e:
assert path in e.args[0]
def test_read_table_doesnt_warn(datadir):
with pytest.warns(None) as record:
pq.read_table(datadir / 'v0.7.1.parquet')
assert len(record) == 0
def _test_write_to_dataset_with_partitions(base_path,
filesystem=None,
schema=None,
index_name=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,
preserve_index=False)
pq.write_to_dataset(output_table, base_path, partition_by,
filesystem=filesystem)
metadata_path = os.path.join(base_path, '_common_metadata')
if filesystem is not None:
with filesystem.open(metadata_path, 'wb') as f:
pq.write_metadata(output_table.schema, f)
else:
pq.write_metadata(output_table.schema, metadata_path)
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
dataset = pq.ParquetDataset(base_path,
filesystem=filesystem,
validate_schema=True)
# ARROW-2209: Ensure the dataset schema also includes the partition columns
dataset_cols = set(dataset.schema.to_arrow_schema().names)
assert dataset_cols == set(output_table.schema.names)
input_table = dataset.read()
input_df = input_table.to_pandas()
# Read data back in and compare with original DataFrame
# Partitioned columns added to the end of the DataFrame when read
input_df_cols = input_df.columns.tolist()
assert partition_by == input_df_cols[-1 * len(partition_by):]
# Partitioned columns become 'categorical' dtypes
input_df = input_df[cols]
for col in partition_by:
output_df[col] = output_df[col].astype('category')
assert output_df.equals(input_df)
def _test_write_to_dataset_no_partitions(base_path, filesystem=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
output_table = pa.Table.from_pandas(output_df)
if filesystem is None:
filesystem = LocalFileSystem.get_instance()
# Without partitions, append files to root_path
n = 5
for i in range(n):
pq.write_to_dataset(output_table, base_path,
filesystem=filesystem)
output_files = [file for file in filesystem.ls(base_path)
if file.endswith(".parquet")]
assert len(output_files) == n
# Deduplicated incoming DataFrame should match
# original outgoing Dataframe
input_table = pq.ParquetDataset(base_path,
filesystem=filesystem).read()
input_df = input_table.to_pandas()
input_df = input_df.drop_duplicates()
input_df = input_df[cols]
assert output_df.equals(input_df)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_schema(tempdir):
schema = pa.schema([pa.field('group1', type=pa.string()),
pa.field('group2', type=pa.string()),
pa.field('num', type=pa.int64()),
pa.field('nan', type=pa.int32()),
pa.field('date', type=pa.timestamp(unit='us'))])
_test_write_to_dataset_with_partitions(str(tempdir), schema=schema)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_index_name(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir),
index_name='index_name')
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(tempdir):
_test_write_to_dataset_no_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df)
path = str(tempdir)
def partition_filename_callback(keys):
return "{0}-{1}.parquet".format(*keys)
pq.write_to_dataset(output_table, path,
partition_by, partition_filename_callback)
dataset = pq.ParquetDataset(path)
# ARROW-3538: Ensure partition filenames match the given pattern
# defined in the local function partition_filename_callback
expected_basenames = [
'a-e.parquet', 'a-f.parquet',
'b-e.parquet', 'b-f.parquet',
'b-g.parquet', 'c-e.parquet'
]
output_basenames = [os.path.basename(p.path) for p in dataset.pieces]
assert sorted(expected_basenames) == sorted(output_basenames)
@pytest.mark.large_memory
def test_large_table_int32_overflow():
size = np.iinfo('int32').max + 1
arr = np.ones(size, dtype='uint8')
parr = pa.array(arr, type=pa.uint8())
table = pa.Table.from_arrays([parr], names=['one'])
f = io.BytesIO()
_write_table(table, f)
def _simple_table_roundtrip(table):
stream = pa.BufferOutputStream()
_write_table(table, stream)
buf = stream.getvalue()
return _read_table(buf)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_binary_array_overflow_to_chunked():
# ARROW-3762
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
tbl = pa.Table.from_pandas(df, preserve_index=False)
read_tbl = _simple_table_roundtrip(tbl)
col0_data = read_tbl[0]
assert isinstance(col0_data, pa.ChunkedArray)
# Split up into 2GB chunks
assert col0_data.num_chunks == 2
assert tbl.equals(read_tbl)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_list_of_binary_large_cell():
# ARROW-4688
data = []
# TODO(wesm): handle chunked children
# 2^31 - 1 bytes in a single cell
# data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])
# A little under 2GB in cell each containing approximately 10MB each
data.extend([[b'x' * 1000000] * 10] * 214)
arr = pa.array(data)
table = pa.Table.from_arrays([arr], ['chunky_cells'])
read_table = _simple_table_roundtrip(table)
assert table.equals(read_table)
@pytest.mark.pandas
def test_index_column_name_duplicate(tempdir):
data = {
'close': {
pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,
pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,
},
'time': {
pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(
'2017-06-30 01:31:00'
),
pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(
'2017-06-30 01:32:00'
),
}
}
path = str(tempdir / 'data.parquet')
dfx = pd.DataFrame(data).set_index('time', drop=False)
tdfx = pa.Table.from_pandas(dfx)
_write_table(tdfx, path)
arrow_table = _read_table(path)
result_df = arrow_table.to_pandas()
tm.assert_frame_equal(result_df, dfx)
@pytest.mark.pandas
def test_parquet_nested_convenience(tempdir):
# ARROW-1684
df = pd.DataFrame({
'a': [[1, 2, 3], None, [4, 5], []],
'b': [[1.], None, None, [6., 7.]],
})
path = str(tempdir / 'nested_convenience.parquet')
table = pa.Table.from_pandas(df, preserve_index=False)
_write_table(table, path)
read = pq.read_table(path, columns=['a'])
tm.assert_frame_equal(read.to_pandas(), df[['a']])
read = pq.read_table(path, columns=['a', 'b'])
tm.assert_frame_equal(read.to_pandas(), df)
@pytest.mark.pandas
def test_backwards_compatible_index_naming(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=None, header=0, engine='python')
table = _read_table(datadir / 'v0.7.1.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_some_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string),
sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
expected.index = expected.index.set_names(['cut', None, 'clarity'])
table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_column_metadata_handling(datadir):
expected = pd.DataFrame(
{'a': [1, 2, 3], 'b': [.1, .2, .3],
'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
expected.index = pd.MultiIndex.from_arrays(
[['a', 'b', 'c'],
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')],
names=['index', None])
path = datadir / 'v0.7.1.column-metadata-handling.parquet'
table = _read_table(path)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
table = _read_table(path, columns=['a'])
result = table.to_pandas()
tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))
def _make_dataset_for_pickling(tempdir, N=100):
path = tempdir / 'data.parquet'
fs = LocalFileSystem.get_instance()
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
table = pa.Table.from_pandas(df)
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
return dataset
@pytest.mark.pandas
@pytest.mark.parametrize('pickler', [
pytest.param(pickle, id='builtin'),
pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')
])
def test_pickle_dataset(tempdir, datadir, pickler):
def is_pickleable(obj):
return obj == pickler.loads(pickler.dumps(obj))
dataset = _make_dataset_for_pickling(tempdir)
assert is_pickleable(dataset)
assert is_pickleable(dataset.metadata)
assert is_pickleable(dataset.metadata.schema)
assert len(dataset.metadata.schema)
for column in dataset.metadata.schema:
assert is_pickleable(column)
for piece in dataset.pieces:
assert is_pickleable(piece)
metadata = piece.get_metadata()
assert metadata.num_row_groups
for i in range(metadata.num_row_groups):
assert is_pickleable(metadata.row_group(i))
@pytest.mark.pandas
def test_decimal_roundtrip(tempdir):
num_values = 10
columns = {}
for precision in range(1, 39):
for scale in range(0, precision + 1):
with util.random_seed(0):
random_decimal_values = [
util.randdecimal(precision, scale)
for _ in range(num_values)
]
column_name = ('dec_precision_{:d}_scale_{:d}'
.format(precision, scale))
columns[column_name] = random_decimal_values
expected = pd.DataFrame(columns)
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
table = pa.Table.from_pandas(expected)
_write_table(table, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@pytest.mark.xfail(
raises=pa.ArrowException, reason='Parquet does not support negative scale'
)
def test_decimal_roundtrip_negative_scale(tempdir):
expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
t = pa.Table.from_pandas(expected)
_write_table(t, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj_with_exception(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
error_text = 'Artificial Error'
try:
with pq.ParquetWriter(out,
arrow_table.schema,
version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
if i == 5:
raise ValueError(error_text)
except Exception as e:
assert str(e) == error_text
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_zlib_compression_bug():
# ARROW-3514: "zlib deflate failed, output buffer too small"
table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])
f = io.BytesIO()
pq.write_table(table, f, compression='gzip')
f.seek(0)
roundtrip = pq.read_table(f)
tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema)
assert table1.schema.equals(table2.schema, check_metadata=False)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
def test_empty_row_groups(tempdir):
# ARROW-3020
table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])
path = tempdir / 'empty_row_groups.parquet'
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
for i in range(num_groups):
assert reader.read_row_group(i).equals(table)
@pytest.mark.pandas
def test_parquet_writer_with_caller_provided_filesystem():
out = pa.BufferOutputStream()
class CustomFS(FileSystem):
def __init__(self):
self.path = None
self.mode = None
def open(self, path, mode='rb'):
self.path = path
self.mode = mode
return out
fs = CustomFS()
fname = 'expected_fname.parquet'
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \
as writer:
writer.write_table(table)
assert fs.path == fname
assert fs.mode == 'wb'
assert out.closed
buf = out.getvalue()
table_read = _read_table(pa.BufferReader(buf))
df_read = table_read.to_pandas()
tm.assert_frame_equal(df_read, df)
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError) as err_info:
pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)
expected_msg = ("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
assert str(err_info) == expected_msg
def test_writing_empty_lists():
# ARROW-2591: [Python] Segmentation fault issue in pq.write_table
arr1 = pa.array([[], []], pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr1], ['list(int32)'])
_check_roundtrip(table)
def test_write_nested_zero_length_array_chunk_failure():
# Bug report in ARROW-3792
cols = OrderedDict(
int32=pa.int32(),
list_string=pa.list_(pa.string())
)
data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]
# This produces a table with a column like
# <Column name='list_string' type=ListType(list<item: string>)>
# [
# [],
# [
# [
# "G"
# ]
# ]
# ]
#
# Each column is a ChunkedArray with 2 elements
my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()
for batch in data]
my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols))
for batch in my_arrays]
tbl = pa.Table.from_batches(my_batches, pa.schema(cols))
_check_roundtrip(tbl)
@pytest.mark.pandas
def test_partitioned_dataset(tempdir):
# ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset
# to a Parquet file
path = tempdir / "ARROW-3208"
df = pd.DataFrame({
'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],
'two': [-1, 10, 2, 100, 1000, 1, 11],
'three': [0, 0, 0, 0, 0, 0, 0]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'])
table = pq.ParquetDataset(path).read()
pq.write_table(table, path / "output.parquet")
def test_read_column_invalid_index():
table = pa.table([pa.array([4, 5]), pa.array(["foo", "bar"])],
names=['ints', 'strs'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
f = pq.ParquetFile(bio.getvalue())
assert f.reader.read_column(0).to_pylist() == [4, 5]
assert f.reader.read_column(1).to_pylist() == ["foo", "bar"]
for index in (-1, 2):
with pytest.raises((ValueError, IndexError)):
f.reader.read_column(index)
def test_direct_read_dictionary():
# ARROW-3325
repeats = 10
nunique = 5
data = [
[tm.rands(10) for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0'])
# Compute dictionary-encoded subfield
expected = pa.table([table[0].dictionary_encode()], names=['f0'])
assert result.equals(expected)
def test_dataset_read_dictionary(tempdir):
path = tempdir / "ARROW-3325-dataset"
t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
pq.write_to_dataset(t1, root_path=str(path))
pq.write_to_dataset(t2, root_path=str(path))
result = pq.ParquetDataset(path, read_dictionary=['f0']).read()
# The order of the chunks is non-deterministic
ex_chunks = [t1[0].chunk(0).dictionary_encode(),
t2[0].chunk(0).dictionary_encode()]
assert result[0].num_chunks == 2
c0, c1 = result[0].chunk(0), result[0].chunk(1)
if c0.equals(ex_chunks[0]):
assert c1.equals(ex_chunks[1])
else:
assert c0.equals(ex_chunks[1])
assert c1.equals(ex_chunks[0])
def test_direct_read_dictionary_subfield():
repeats = 10
nunique = 5
data = [
[[tm.rands(10)] for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0.list.item'])
arr = pa.array(data[0])
values_as_dict = arr.values.dictionary_encode()
inner_indices = values_as_dict.indices.cast('int32')
new_values = pa.DictionaryArray.from_arrays(inner_indices,
values_as_dict.dictionary)
offsets = pa.array(range(51), type='int32')
expected_arr = pa.ListArray.from_arrays(offsets, new_values)
expected = pa.table([expected_arr], names=['f0'])
assert result.equals(expected)
assert result[0].num_chunks == 1
@pytest.mark.pandas
def test_dataset_metadata(tempdir):
path = tempdir / "ARROW-1983-dataset"
# create and write a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
metadata_list = []
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'],
metadata_collector=metadata_list)
# open the dataset and collect metadata from pieces:
dataset = pq.ParquetDataset(path)
metadata_list2 = [p.get_metadata() for p in dataset.pieces]
# compare metadata list content:
assert len(metadata_list) == len(metadata_list2)
for md, md2 in zip(metadata_list, metadata_list2):
d = md.to_dict()
d2 = md2.to_dict()
# serialized_size is initialized in the reader:
assert d.pop('serialized_size') == 0
assert d2.pop('serialized_size') > 0
assert d == d2
def test_parquet_file_too_small(tempdir):
path = str(tempdir / "test.parquet")
with pytest.raises(pa.ArrowIOError,
match='size is 0 bytes'):
with open(path, 'wb') as f:
pass
pq.read_table(path)
with pytest.raises(pa.ArrowIOError,
match='size is 4 bytes'):
with open(path, 'wb') as f:
f.write(b'ffff')
pq.read_table(path)
@pytest.mark.pandas
def test_categorical_index_survives_roundtrip():
# ARROW-3652, addressed by ARROW-3246
df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])
df['c1'] = df['c1'].astype('category')
df = df.set_index(['c1'])
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
ref_df = pq.read_pandas(bos.getvalue()).to_pandas()
assert isinstance(ref_df.index, pd.CategoricalIndex)
assert ref_df.index.equals(df.index)
def test_dictionary_array_automatically_read():
# ARROW-3246
# Make a large dictionary, a little over 4MB of data
dict_length = 4000
dict_values = pa.array([('x' * 1000 + '_{}'.format(i))
for i in range(dict_length)])
num_chunks = 10
chunk_size = 100
chunks = []
for i in range(num_chunks):
indices = np.random.randint(0, dict_length,
size=chunk_size).astype(np.int32)
chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),
dict_values))
table = pa.table([pa.chunked_array(chunks)], names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents))
assert result.equals(table)
# The only key in the metadata was the Arrow schema key
assert result.schema.metadata is None
@pytest.mark.pandas
def test_pandas_categorical_na_type_row_groups():
# ARROW-5085
df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100})
df_category = df.astype({"col": "category", "int": "category"})
table = pa.Table.from_pandas(df)
table_cat = pa.Table.from_pandas(df_category)
buf = pa.BufferOutputStream()
# it works
pq.write_table(table_cat, buf, version="2.0", chunk_size=10)
result = pq.read_table(buf.getvalue())
# Result is non-categorical
assert result[0].equals(table[0])
assert result[1].equals(table[1])
@pytest.mark.pandas
def test_pandas_categorical_roundtrip():
# ARROW-5480, this was enabled by ARROW-3246
# Have one of the categories unobserved and include a null (-1)
codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')
categories = ['foo', 'bar', 'baz']
df = pd.DataFrame({'x': pd.Categorical.from_codes(
codes, categories=categories)})
buf = pa.BufferOutputStream()
pq.write_table(pa.table(df), buf)
result = pq.read_table(buf.getvalue()).to_pandas()
assert result.x.dtype == 'category'
assert (result.x.cat.categories == categories).all()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
def test_multi_dataset_metadata(tempdir):
filenames = ["ARROW-1983-dataset.0", "ARROW-1983-dataset.1"]
metapath = str(tempdir / "_metadata")
# create a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
# write dataset twice and collect/merge metadata
_meta = None
for filename in filenames:
meta = []
pq.write_table(table, str(tempdir / filename),
metadata_collector=meta)
meta[0].set_file_path(filename)
if _meta is None:
_meta = meta[0]
else:
_meta.append_row_groups(meta[0])
# Write merged metadata-only file
with open(metapath, "wb") as f:
_meta.write_metadata_file(f)
# Read back the metadata
meta = pq.read_metadata(metapath)
md = meta.to_dict()
_md = _meta.to_dict()
for key in _md:
if key != 'serialized_size':
assert _md[key] == md[key]
assert _md['num_columns'] == 3
assert _md['num_rows'] == 6
assert _md['num_row_groups'] == 2
assert _md['serialized_size'] == 0
assert md['serialized_size'] > 0
@pytest.mark.pandas
def test_filter_before_validate_schema(tempdir):
# ARROW-4076 apply filter before schema validation
# to avoid checking unneeded schemas
# create partitioned dataset with mismatching schemas which would
# otherwise raise if first validation all schemas
dir1 = tempdir / 'A=0'
dir1.mkdir()
table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))
pq.write_table(table1, dir1 / 'data.parquet')
dir2 = tempdir / 'A=1'
dir2.mkdir()
table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))
pq.write_table(table2, dir2 / 'data.parquet')
# read single file using filter
table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])
assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
| 32.513709 | 79 | 0.627494 |
from collections import OrderedDict
import datetime
import decimal
import io
import json
import os
import six
import pickle
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.compat import guid, u, BytesIO, unichar, PY2
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.filesystem import LocalFileSystem, FileSystem
try:
import pyarrow.parquet as pq
except ImportError:
pq = None
try:
import pandas as pd
import pandas.util.testing as tm
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
except ImportError:
pd = tm = None
pytestmark = pytest.mark.parquet
@pytest.fixture(scope='module')
def datadir(datadir):
return datadir / 'parquet'
def _write_table(table, path, **kwargs):
import pyarrow.parquet as pq
if _pandas_api.is_data_frame(table):
table = pa.Table.from_pandas(table)
pq.write_table(table, path, **kwargs)
return table
def _read_table(*args, **kwargs):
return pq.read_table(*args, **kwargs)
def _roundtrip_table(table, read_table_kwargs=None,
write_table_kwargs=None):
read_table_kwargs = read_table_kwargs or {}
write_table_kwargs = write_table_kwargs or {}
buf = io.BytesIO()
_write_table(table, buf, **write_table_kwargs)
buf.seek(0)
return _read_table(buf, **read_table_kwargs)
def _check_roundtrip(table, expected=None, read_table_kwargs=None,
**write_table_kwargs):
if expected is None:
expected = table
read_table_kwargs = read_table_kwargs or {}
result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
def _roundtrip_pandas_dataframe(df, write_kwargs):
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, **write_kwargs)
buf.seek(0)
table1 = _read_table(buf)
return table1.to_pandas()
@pytest.mark.parametrize('dtype', [int, float])
def test_single_pylist_column_roundtrip(tempdir, dtype):
filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)
data = [pa.array(list(map(dtype, range(5))))]
table = pa.Table.from_arrays(data, names=['a'])
_write_table(table, filename)
table_read = _read_table(filename)
for i in range(table.num_columns):
col_written = table[i]
col_read = table_read[i]
assert table.field(i).name == table_read.field(i).name
assert col_read.num_chunks == 1
data_written = col_written.chunk(0)
data_read = col_read.chunk(0)
assert data_written.equals(data_read)
def alltypes_sample(size=10000, seed=0, categorical=False):
np.random.seed(seed)
arrays = {
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': pd.Series([str(x) for x in range(size)]),
'empty_str': [''] * size,
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'null': [None] * size,
'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],
}
if categorical:
arrays['str_category'] = arrays['str'].astype('category')
return pd.DataFrame(arrays)
@pytest.mark.pandas
@pytest.mark.parametrize('chunk_size', [None, 1000])
def test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):
df = alltypes_sample(size=10000, categorical=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version="2.0",
coerce_timestamps='ms', chunk_size=chunk_size)
table_read = pq.read_pandas(filename)
assert table_read.schema.pandas_metadata is not None
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def test_set_data_page_size():
arr = pa.array([1, 2, 3] * 1000000)
t = pa.Table.from_arrays([arr], names=['f0'])
page_sizes = [2 << 16, 2 << 17, 2 << 18]
for target_page_size in page_sizes:
_check_roundtrip(t, data_page_size=target_page_size)
@pytest.mark.pandas
def test_chunked_table_write():
df = alltypes_sample(size=10)
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
df, _ = dataframe_with_lists()
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_no_memory_map(tempdir):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'memory_map': False},
version='2.0')
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, memory_map=False)
assert table_read.equals(table)
def test_special_chars_filename(tempdir):
table = pa.Table.from_arrays([pa.array([42])], ["ints"])
filename = "foo # bar"
path = tempdir / filename
assert not path.exists()
_write_table(table, str(path))
assert path.exists()
table_read = _read_table(str(path))
assert table_read.equals(table)
@pytest.mark.pandas
def test_empty_table_roundtrip():
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
table = pa.Table.from_arrays(
[col.chunk(0)[:0] for col in table.itercolumns()],
names=table.schema.names)
assert table.schema.field_by_name('null').type == pa.null()
assert table.schema.field_by_name('null_list').type == pa.list_(pa.null())
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_empty_table_no_columns():
df = pd.DataFrame()
empty = pa.Table.from_pandas(df, preserve_index=False)
_check_roundtrip(empty)
def test_empty_lists_table_roundtrip():
arr = pa.array([[], []], type=pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr], ["A"])
_check_roundtrip(table)
@pytest.mark.pandas
def test_pandas_parquet_datetime_tz():
s = pd.Series([datetime.datetime(2017, 9, 6)])
s = s.dt.tz_localize('utc')
s.index = s
# Both a column and an index to hit both use cases
df = pd.DataFrame({'tz_aware': s,
'tz_eastern': s.dt.tz_convert('US/Eastern')},
index=s)
f = BytesIO()
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, f, coerce_timestamps='ms')
f.seek(0)
table_read = pq.read_pandas(f)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@pytest.mark.skipif(six.PY2, reason='datetime.timezone is available since '
'python version 3.2')
def test_datetime_timezone_tzinfo():
value = datetime.datetime(2018, 1, 1, 1, 23, 45,
tzinfo=datetime.timezone.utc)
df = pd.DataFrame({'foo': [value]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_pandas_parquet_column_multiindex(tempdir):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_1_0_roundtrip(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename, version='1.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
# We pass uint32_t as int64_t if we write Parquet version 1.0
df['uint32'] = df['uint32'].values.astype(np.int64)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_multiple_path_types(tempdir):
# Test compatibility with PEP 519 path-like objects
path = tempdir / 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# Test compatibility with plain string paths
path = str(tempdir) + 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_column_selection(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(filename, columns=['uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(filename, columns=['uint8', 'uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
def _random_integers(size, dtype):
# We do not generate integers outside the int64 range
platform_int_info = np.iinfo('int_')
iinfo = np.iinfo(dtype)
return np.random.randint(max(iinfo.min, platform_int_info.min),
min(iinfo.max, platform_int_info.max),
size=size).astype(dtype)
def _test_dataframe(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': _random_integers(size, np.uint8),
'uint16': _random_integers(size, np.uint16),
'uint32': _random_integers(size, np.uint32),
'uint64': _random_integers(size, np.uint64),
'int8': _random_integers(size, np.int8),
'int16': _random_integers(size, np.int16),
'int32': _random_integers(size, np.int32),
'int64': _random_integers(size, np.int64),
'float32': np.random.randn(size).astype(np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': [tm.rands(10) for i in range(size)],
'all_none': [None] * size,
'all_none_category': [None] * size
})
# TODO(PARQUET-1015)
# df['all_none_category'] = df['all_none_category'].astype('category')
return df
@pytest.mark.pandas
def test_pandas_parquet_native_file_roundtrip(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_parquet_incremental_file_build(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
writer.close()
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_read_pandas_column_subset(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
def test_pandas_parquet_empty_roundtrip(tempdir):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_pyfile_roundtrip(tempdir):
filename = tempdir / 'pandas_pyfile_roundtrip.parquet'
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with filename.open('wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(filename.read_bytes())
table_read = _read_table(data)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_configuration_options(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename, version='2.0',
use_dictionary=use_dictionary)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for write_statistics in [True, False]:
_write_table(arrow_table, filename, version='2.0',
write_statistics=write_statistics)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:
_write_table(arrow_table, filename, version='2.0',
compression=compression)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def make_sample_file(table_or_df):
if isinstance(table_or_df, pa.Table):
a_table = table_or_df
else:
a_table = pa.Table.from_pandas(table_or_df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='SNAPPY', version='2.0',
coerce_timestamps='ms')
buf.seek(0)
return pq.ParquetFile(buf)
@pytest.mark.pandas
def test_parquet_metadata_api():
df = alltypes_sample(size=10000)
df = df.reindex(columns=sorted(df.columns))
df.index = np.random.randint(0, 1000000, size=len(df))
fileh = make_sample_file(df)
ncols = len(df.columns)
# Series of sniff tests
meta = fileh.metadata
repr(meta)
assert meta.num_rows == len(df)
assert meta.num_columns == ncols + 1 # +1 for index
assert meta.num_row_groups == 1
assert meta.format_version == '2.0'
assert 'parquet-cpp' in meta.created_by
assert isinstance(meta.serialized_size, int)
assert isinstance(meta.metadata, dict)
# Schema
schema = fileh.schema
assert meta.schema is schema
assert len(schema) == ncols + 1 # +1 for index
repr(schema)
col = schema[0]
repr(col)
assert col.name == df.columns[0]
assert col.max_definition_level == 1
assert col.max_repetition_level == 0
assert col.max_repetition_level == 0
assert col.physical_type == 'BOOLEAN'
assert col.converted_type == 'NONE'
with pytest.raises(IndexError):
schema[ncols + 1] # +1 for index
with pytest.raises(IndexError):
schema[-1]
# Row group
for rg in range(meta.num_row_groups):
rg_meta = meta.row_group(rg)
assert isinstance(rg_meta, pq.RowGroupMetaData)
repr(rg_meta)
for col in range(rg_meta.num_columns):
col_meta = rg_meta.column(col)
assert isinstance(col_meta, pq.ColumnChunkMetaData)
repr(col_meta)
with pytest.raises(IndexError):
meta.row_group(-1)
with pytest.raises(IndexError):
meta.row_group(meta.num_row_groups + 1)
rg_meta = meta.row_group(0)
assert rg_meta.num_rows == len(df)
assert rg_meta.num_columns == ncols + 1 # +1 for index
assert rg_meta.total_byte_size > 0
with pytest.raises(IndexError):
col_meta = rg_meta.column(-1)
with pytest.raises(IndexError):
col_meta = rg_meta.column(ncols + 2)
col_meta = rg_meta.column(0)
assert col_meta.file_offset > 0
assert col_meta.file_path == '' # created from BytesIO
assert col_meta.physical_type == 'BOOLEAN'
assert col_meta.num_values == 10000
assert col_meta.path_in_schema == 'bool'
assert col_meta.is_stats_set is True
assert isinstance(col_meta.statistics, pq.Statistics)
assert col_meta.compression == 'SNAPPY'
assert col_meta.encodings == ('PLAIN', 'RLE')
assert col_meta.has_dictionary_page is False
assert col_meta.dictionary_page_offset is None
assert col_meta.data_page_offset > 0
assert col_meta.total_compressed_size > 0
assert col_meta.total_uncompressed_size > 0
with pytest.raises(NotImplementedError):
col_meta.has_index_page
with pytest.raises(NotImplementedError):
col_meta.index_page_offset
@pytest.mark.pandas
@pytest.mark.parametrize(
(
'data',
'type',
'physical_type',
'min_value',
'max_value',
'null_count',
'num_values',
'distinct_count'
),
[
([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float32(),
'FLOAT', -1.1, 4.4, 1, 4, 0
),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float64(),
'DOUBLE', -1.1, 4.4, 1, 4, 0
),
(
[u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),
'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0
),
(
[True, False, False, True, True], pa.bool_(),
'BOOLEAN', False, True, 0, 5, 0
),
(
[b'\x00', b'b', b'12', None, b'aaa'], pa.binary(),
'BYTE_ARRAY', b'\x00', b'b', 1, 4, 0
),
]
)
def test_parquet_column_statistics_api(data, type, physical_type, min_value,
max_value, null_count, num_values,
distinct_count):
df = pd.DataFrame({'data': data})
schema = pa.schema([pa.field('data', type)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
fileh = make_sample_file(table)
meta = fileh.metadata
rg_meta = meta.row_group(0)
col_meta = rg_meta.column(0)
stat = col_meta.statistics
assert stat.has_min_max
assert _close(type, stat.min, min_value)
assert _close(type, stat.max, max_value)
assert stat.null_count == null_count
assert stat.num_values == num_values
# TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount
assert stat.distinct_count == distinct_count
assert stat.physical_type == physical_type
def _close(type, left, right):
if type == pa.float32():
return abs(left - right) < 1E-7
elif type == pa.float64():
return abs(left - right) < 1E-13
else:
return left == right
def test_statistics_convert_logical_types(tempdir):
cases = [(10, 11164359321221007157, pa.uint64()),
(10, 4294967295, pa.uint32()),
(u"ähnlich", u"öffentlich", pa.utf8()),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time32('ms')),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time64('us')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('ms')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('us'))]
for i, (min_val, max_val, typ) in enumerate(cases):
t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],
['col'])
path = str(tempdir / ('example{}.parquet'.format(i)))
pq.write_table(t, path, version='2.0')
pf = pq.ParquetFile(path)
stats = pf.metadata.row_group(0).column(0).statistics
assert stats.min == min_val
assert stats.max == max_val
def test_parquet_write_disable_statistics(tempdir):
table = pa.Table.from_pydict(
{'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})
_write_table(table, tempdir / 'data.parquet')
meta = pq.read_metadata(tempdir / 'data.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is True
assert cc.statistics is not None
_write_table(table, tempdir / 'data2.parquet', write_statistics=False)
meta = pq.read_metadata(tempdir / 'data2.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is False
assert cc.statistics is None
_write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])
meta = pq.read_metadata(tempdir / 'data3.parquet')
cc_a = meta.row_group(0).column(0)
assert cc_a.is_stats_set is True
assert cc_a.statistics is not None
cc_b = meta.row_group(0).column(1)
assert cc_b.is_stats_set is False
assert cc_b.statistics is None
@pytest.mark.pandas
def test_compare_schemas():
df = alltypes_sample(size=10000)
fileh = make_sample_file(df)
fileh2 = make_sample_file(df)
fileh3 = make_sample_file(df[df.columns[::2]])
assert isinstance(fileh.schema, pq.ParquetSchema)
assert fileh.schema.equals(fileh.schema)
assert fileh.schema == fileh.schema
assert fileh.schema.equals(fileh2.schema)
assert fileh.schema == fileh2.schema
assert fileh.schema != 'arbitrary object'
assert not fileh.schema.equals(fileh3.schema)
assert fileh.schema != fileh3.schema
assert isinstance(fileh.schema[0], pq.ColumnSchema)
assert fileh.schema[0].equals(fileh.schema[0])
assert fileh.schema[0] == fileh.schema[0]
assert not fileh.schema[0].equals(fileh.schema[1])
assert fileh.schema[0] != fileh.schema[1]
assert fileh.schema[0] != 'arbitrary object'
def test_validate_schema_write_table(tempdir):
simple_fields = [
pa.field('POS', pa.uint32()),
pa.field('desc', pa.string())
]
simple_schema = pa.schema(simple_fields)
simple_from_array = [pa.array([1]), pa.array(['bla'])]
simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])
path = tempdir / 'simple_validate_schema.parquet'
with pq.ParquetWriter(path, simple_schema,
version='2.0',
compression='snappy', flavor='spark') as w:
with pytest.raises(ValueError):
w.write_table(simple_table)
@pytest.mark.pandas
def test_column_of_arrays(tempdir):
df, schema = dataframe_with_arrays()
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_coerce_timestamps(tempdir):
from collections import OrderedDict
arrays = OrderedDict()
fields = [pa.field('datetime64',
pa.list_(pa.timestamp('ms')))]
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='us')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
df_expected = df.copy()
for i, x in enumerate(df_expected['datetime64']):
if isinstance(x, np.ndarray):
df_expected['datetime64'][i] = x.astype('M8[us]')
tm.assert_frame_equal(df_expected, df_read)
with pytest.raises(ValueError):
_write_table(arrow_table, filename, version='2.0',
coerce_timestamps='unknown')
@pytest.mark.pandas
def test_coerce_timestamps_truncated(tempdir):
dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1, microsecond=1)
dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1)
fields_us = [pa.field('datetime64', pa.timestamp('us'))]
arrays_us = {'datetime64': [dt_us, dt_ms]}
df_us = pd.DataFrame(arrays_us)
schema_us = pa.schema(fields_us)
filename = tempdir / 'pandas_truncated.parquet'
table_us = pa.Table.from_pandas(df_us, schema=schema_us)
_write_table(table_us, filename, version="2.0", coerce_timestamps='ms',
allow_truncated_timestamps=True)
table_ms = _read_table(filename)
df_ms = table_ms.to_pandas()
arrays_expected = {'datetime64': [dt_ms, dt_ms]}
df_expected = pd.DataFrame(arrays_expected)
tm.assert_frame_equal(df_expected, df_ms)
@pytest.mark.pandas
def test_column_of_lists(tempdir):
df, schema = dataframe_with_lists(parquet_compatible=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version='2.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
if PY2:
for col in ['date32[day]_list', 'date64[ms]_list']:
df[col] = df[col].apply(
lambda x: list(map(np.datetime64, x)) if x else x
)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_date_time_types(tempdir):
t1 = pa.date32()
data1 = np.array([17259, 17260, 17261], dtype='int32')
a1 = pa.array(data1, type=t1)
t2 = pa.date64()
data2 = data1.astype('int64') * 86400000
a2 = pa.array(data2, type=t2)
t3 = pa.timestamp('us')
start = pd.Timestamp('2001-01-01').value / 1000
data3 = np.array([start, start + 1, start + 2], dtype='int64')
a3 = pa.array(data3, type=t3)
t4 = pa.time32('ms')
data4 = np.arange(3, dtype='i4')
a4 = pa.array(data4, type=t4)
t5 = pa.time64('us')
a5 = pa.array(data4.astype('int64'), type=t5)
t6 = pa.time32('s')
a6 = pa.array(data4, type=t6)
ex_t6 = pa.time32('ms')
ex_a6 = pa.array(data4 * 1000, type=ex_t6)
t7 = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data7 = np.array([start, start + 1000, start + 2000],
dtype='int64')
a7 = pa.array(data7, type=t7)
table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0')
t0 = pa.timestamp('ms')
data0 = np.arange(4, dtype='int64')
a0 = pa.array(data0, type=t0)
t1 = pa.timestamp('us')
data1 = np.arange(4, dtype='int64')
a1 = pa.array(data1, type=t1)
t2 = pa.timestamp('ns')
data2 = np.arange(4, dtype='int64')
a2 = pa.array(data2, type=t2)
table = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
expected = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
filename = tempdir / 'int64_timestamps.parquet'
_write_table(table, filename, version='2.0')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT64'
read_table = _read_table(filename)
assert read_table.equals(expected)
t0_ns = pa.timestamp('ns')
data0_ns = np.array(data0 * 1000000, dtype='int64')
a0_ns = pa.array(data0_ns, type=t0_ns)
t1_ns = pa.timestamp('ns')
data1_ns = np.array(data1 * 1000, dtype='int64')
a1_ns = pa.array(data1_ns, type=t1_ns)
expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
filename = tempdir / 'explicit_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
use_deprecated_int96_timestamps=True)
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
filename = tempdir / 'spark_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
flavor='spark')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
def test_timestamp_restore_timezone():
ty = pa.timestamp('ms', tz='America/New_York')
arr = pa.array([1, 2, 3], type=ty)
t = pa.table([arr], names=['f0'])
_check_roundtrip(t)
@pytest.mark.pandas
def test_list_of_datetime_time_roundtrip():
times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',
'11:30', '12:00'])
df = pd.DataFrame({'time': [times.time]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_parquet_version_timestamp_differences():
i_s = pd.Timestamp('2010-01-01').value / 1000000000
d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')
d_ms = d_s * 1000
d_us = d_ms * 1000
d_ns = d_us * 1000
a_s = pa.array(d_s, type=pa.timestamp('s'))
a_ms = pa.array(d_ms, type=pa.timestamp('ms'))
a_us = pa.array(d_us, type=pa.timestamp('us'))
a_ns = pa.array(d_ns, type=pa.timestamp('ns'))
names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']
table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)
_check_roundtrip(table, expected)
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)
_check_roundtrip(table, expected, version='2.0')
expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)
_check_roundtrip(table, expected, coerce_timestamps='ms')
expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)
_check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')
expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
_check_roundtrip(table, expected,
use_deprecated_int96_timestamps=True)
_check_roundtrip(table, expected, version='2.0',
use_deprecated_int96_timestamps=True)
def test_large_list_records():
list_lengths = np.random.randint(0, 500, size=50)
list_lengths[::10] = 0
list_values = [list(map(int, np.random.randint(0, 100, size=x)))
if i % 8 else None
for i, x in enumerate(list_lengths)]
a1 = pa.array(list_values)
table = pa.Table.from_arrays([a1], ['int_lists'])
_check_roundtrip(table)
def test_sanitized_spark_field_names():
a0 = pa.array([0, 1, 2, 3, 4])
name = 'prohib; ,\t{}'
table = pa.Table.from_arrays([a0], [name])
result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})
expected_name = 'prohib______'
assert result.schema[0].name == expected_name
@pytest.mark.pandas
def test_spark_flavor_preserves_pandas_metadata():
df = _test_dataframe(size=100)
df.index = np.arange(0, 10 * len(df), 10)
df.index.name = 'foo'
result = _roundtrip_pandas_dataframe(df, {'version': '2.0',
'flavor': 'spark'})
tm.assert_frame_equal(result, df)
def test_fixed_size_binary():
t0 = pa.binary(10)
data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']
a0 = pa.array(data, type=t0)
table = pa.Table.from_arrays([a0],
['binary[10]'])
_check_roundtrip(table)
@pytest.mark.pandas
def test_multithreaded_read():
df = alltypes_sample(size=10000)
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, compression='SNAPPY', version='2.0')
buf.seek(0)
table1 = _read_table(buf, use_threads=True)
buf.seek(0)
table2 = _read_table(buf, use_threads=False)
assert table1.equals(table2)
@pytest.mark.pandas
def test_min_chunksize():
data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])
table = pa.Table.from_pandas(data.reset_index())
buf = io.BytesIO()
_write_table(table, buf, chunk_size=-1)
buf.seek(0)
result = _read_table(buf)
assert result.equals(table)
with pytest.raises(ValueError):
_write_table(table, buf, chunk_size=0)
@pytest.mark.pandas
def test_pass_separate_metadata():
df = alltypes_sample(size=10000)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='snappy', version='2.0')
buf.seek(0)
metadata = pq.read_metadata(buf)
buf.seek(0)
fileh = pq.ParquetFile(buf, metadata=metadata)
tm.assert_frame_equal(df, fileh.read().to_pandas())
@pytest.mark.pandas
def test_read_single_row_group():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
row_groups = [pf.read_row_group(i) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df, result.to_pandas())
@pytest.mark.pandas
def test_read_single_row_group_with_column_subset():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = list(df.columns[:2])
row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
@pytest.mark.pandas
def test_scan_contents():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.scan_contents() == 10000
assert pf.scan_contents(df.columns[:4]) == 10000
@pytest.mark.pandas
def test_parquet_piece_read(tempdir):
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@pytest.mark.pandas
def test_parquet_piece_open_and_get_metadata(tempdir):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece = pq.ParquetDatasetPiece(path)
table1 = piece.read()
assert isinstance(table1, pa.Table)
meta1 = piece.get_metadata()
assert isinstance(meta1, pq.FileMetaData)
assert table == table1
def test_parquet_piece_basics():
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
def test_partition_set_dictionary_type():
set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@pytest.mark.pandas
def test_read_partitioned_directory(tempdir):
fs = LocalFileSystem.get_instance()
_partition_test_for_filesystem(fs, tempdir)
@pytest.mark.pandas
def test_create_parquet_dataset_multi_threaded(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
manifest = pq.ParquetManifest(base_path, filesystem=fs,
metadata_nthreads=1)
dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)
assert len(dataset.pieces) > 0
partitions = dataset.partitions
assert len(partitions.partition_names) > 0
assert partitions.partition_names == manifest.partitions.partition_names
assert len(partitions.levels) == len(manifest.partitions.levels)
@pytest.mark.pandas
def test_equivalency(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', '=', 1), ('string', '!=', 'b'),
('boolean', '==', True)]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'b' not in result_df['string'].values
assert False not in result_df['boolean'].values
filters = [
[
('integer', '=', 1),
('string', '!=', 'b'),
('boolean', '==', 'True')
],
[('integer', '=', 0), ('boolean', '==', 'False')]
]
dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
table = dataset.read()
result_df = table.to_pandas().reset_index(drop=True)
df_filter_1 = (np.array(result_df['integer']) == 1) \
& (np.array(result_df['string']) != 'b') \
& (np.array(result_df['boolean']) == 'True')
df_filter_2 = (np.array(result_df['integer']) == 0) \
& (np.array(result_df['boolean']) == 'False')
assert df_filter_1.sum() > 0
assert df_filter_2.sum() > 0
assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())
with pytest.raises(NotImplementedError):
filters = [[('string', '==', b'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
with pytest.raises(NotImplementedError):
filters = [[('string', '==', u'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
@pytest.mark.pandas
def test_cutoff_exclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<', 4),
('integers', '>', 1),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [x for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@pytest.mark.xfail(
raises=TypeError,
reason='Loss of type information in creation of categoricals.'
)
def test_cutoff_exclusive_datetime(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
date_keys = [
datetime.date(2018, 4, 9),
datetime.date(2018, 4, 10),
datetime.date(2018, 4, 11),
datetime.date(2018, 4, 12),
datetime.date(2018, 4, 13)
]
partition_spec = [
['dates', date_keys]
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'dates': np.array(date_keys, dtype='datetime64'),
}, columns=['index', 'dates'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('dates', '<', "2018-04-12"),
('dates', '>', "2018-04-10")
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected = pd.Categorical(
np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),
categories=np.array(date_keys, dtype='datetime64'))
assert result_df['dates'].values == expected
@pytest.mark.pandas
def test_inclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<=', 3),
('integers', '>=', 2),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [int(x) for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
def test_inclusive_set(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),
('boolean', 'in', {True})]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'c' not in result_df['string'].values
assert False not in result_df['boolean'].values
@pytest.mark.pandas
def test_invalid_pred_op(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '=<', 3),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', 'in', set()),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '!=', {3}),
])
@pytest.mark.pandas
def test_filters_read_table(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
table = pq.read_table(
base_path, filesystem=fs, filters=[('integers', '<', 3)])
assert table.num_rows == 3
table = pq.read_table(
base_path, filesystem=fs, filters=[[('integers', '<', 3)]])
assert table.num_rows == 3
table = pq.read_pandas(
base_path, filters=[('integers', '<', 3)])
assert table.num_rows == 3
@pytest.yield_fixture
def s3_example():
access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY']
secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY']
bucket_name = os.environ['PYARROW_TEST_S3_BUCKET']
import s3fs
fs = s3fs.S3FileSystem(key=access_key, secret=secret_key)
test_dir = guid()
bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir)
fs.mkdir(bucket_uri)
yield fs, bucket_uri
fs.rm(bucket_uri, recursive=True)
@pytest.mark.pandas
@pytest.mark.s3
def test_read_partitioned_directory_s3fs(s3_example):
from pyarrow.filesystem import S3FSWrapper
fs, bucket_uri = s3_example
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, bucket_uri)
dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)
dataset.read()
def _partition_test_for_filesystem(fs, base_path):
foo_keys = [0, 1]
bar_keys = ['a', 'b', 'c']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
DEPTH = len(partition_spec)
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = base_dir / '{0}={1}'.format(name, value)
fs.mkdir(level_dir)
if level == DEPTH - 1:
file_path = level_dir / guid()
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
assert fs.exists(file_path)
(level_dir / '_SUCCESS').touch()
else:
_visit_level(level_dir, level + 1, this_part_keys)
(level_dir / '_SUCCESS').touch()
_visit_level(base_dir, 0, [])
def _test_read_common_metadata_files(fs, base_path):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(base_path)
data_path = os.path.join(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = os.path.join(base_path, '_common_metadata')
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
assert dataset.common_metadata_path == str(metadata_path)
with fs.open(data_path) as f:
common_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(common_schema)
dataset2 = pq.ParquetDataset([base_path], filesystem=fs)
assert dataset2.schema.equals(dataset.schema)
@pytest.mark.pandas
def test_read_common_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
_test_read_common_metadata_files(fs, tempdir)
@pytest.mark.pandas
def test_read_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'data.parquet'
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
with fs.open(data_path) as f:
metadata_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(metadata_schema)
@pytest.mark.pandas
def test_read_schema(tempdir):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'test.parquet'
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
read1 = pq.read_schema(data_path)
read2 = pq.read_schema(data_path, memory_map=True)
assert table.schema.equals(read1, check_metadata=False)
assert table.schema.equals(read2, check_metadata=False)
assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
if isinstance(value, (datetime.date, datetime.datetime)):
value = pd.Timestamp(value)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@pytest.mark.pandas
def test_read_multiple_files(tempdir):
nfiles = 10
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df['uint32'] = df['uint32'].astype(np.int64)
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
(dirpath / '_SUCCESS.crc').touch()
def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):
dataset = pq.ParquetDataset(paths, **kwargs)
return dataset.read(columns=columns, use_threads=use_threads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
metadata = pq.read_metadata(paths[0])
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)
assert result3.equals(expected)
# Read column subset
to_read = [0, 2, 6, result.num_columns - 1]
col_names = [result.field(i).name for i in to_read]
out = pa.localfs.read_parquet(dirpath, columns=col_names)
expected = pa.Table.from_arrays([result.column(i) for i in to_read],
names=col_names,
metadata=result.schema.metadata)
assert out.equals(expected)
# Read with multiple threads
pa.localfs.read_parquet(dirpath, use_threads=True)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tempdir / '{}.parquet'.format(guid())
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@pytest.mark.pandas
def test_dataset_read_pandas(tempdir):
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_dataset_no_memory_map(tempdir):
# ARROW-2627: Check that we can use ParquetDataset without memory-mapping
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
# TODO(wesm): Not sure how to easily check that memory mapping is _not_
# used. Mocking is not especially easy for pa.memory_map
dataset = pq.ParquetDataset(dirpath, memory_map=False)
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
@pytest.mark.parametrize('preserve_index', [True, False, None])
def test_dataset_read_pandas_common_metadata(tempdir, preserve_index):
# ARROW-1103
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df, preserve_index=preserve_index)
# Obliterate metadata
table = table.replace_schema_metadata(None)
assert table.schema.metadata is None
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
# Write _metadata common file
table_for_metadata = pa.Table.from_pandas(
df, preserve_index=preserve_index
)
pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
expected.index.name = (
df.index.name if preserve_index is not False else None)
tm.assert_frame_equal(result, expected)
def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(file_nrows, seed=i)
path = base_path / '{}.parquet'.format(i)
test_data.append(_write_table(df, path))
paths.append(path)
return paths
@pytest.mark.pandas
def test_ignore_private_directories(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
# private directory
(dirpath / '_impala_staging').mkdir()
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_dot(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '.DS_Store').open('wb') as f:
f.write(b'gibberish')
with (dirpath / '.private').open('wb') as f:
f.write(b'gibberish')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_underscore(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '_committed_123').open('wb') as f:
f.write(b'abcd')
with (dirpath / '_started_321').open('wb') as f:
f.write(b'abcd')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_multiindex_duplicate_values(tempdir):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
filename = tempdir / 'dup_multi_index_levels.parquet'
_write_table(table, filename)
result_table = _read_table(filename)
assert table.equals(result_table)
result_df = result_table.to_pandas()
tm.assert_frame_equal(result_df, df)
@pytest.mark.pandas
def test_write_error_deletes_incomplete_file(tempdir):
# ARROW-1285
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3, freq='ns')})
pdf = pa.Table.from_pandas(df)
filename = tempdir / 'tmp_file'
try:
_write_table(pdf, filename)
except pa.ArrowException:
pass
assert not filename.exists()
@pytest.mark.pandas
def test_noncoerced_nanoseconds_written_without_exception(tempdir):
# ARROW-1957: the Parquet version 2.0 writer preserves Arrow
# nanosecond timestamps by default
n = 9
df = pd.DataFrame({'x': range(n)},
index=pd.DatetimeIndex(start='2017-01-01',
freq='1n',
periods=n))
tb = pa.Table.from_pandas(df)
filename = tempdir / 'written.parquet'
try:
pq.write_table(tb, filename, version='2.0')
except Exception:
pass
assert filename.exists()
recovered_table = pq.read_table(filename)
assert tb.equals(recovered_table)
# Loss of data thru coercion (without explicit override) still an error
filename = tempdir / 'not_written.parquet'
with pytest.raises(ValueError):
pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')
def test_read_non_existent_file(tempdir):
path = 'non-existent-file.parquet'
try:
pq.read_table(path)
except Exception as e:
assert path in e.args[0]
def test_read_table_doesnt_warn(datadir):
with pytest.warns(None) as record:
pq.read_table(datadir / 'v0.7.1.parquet')
assert len(record) == 0
def _test_write_to_dataset_with_partitions(base_path,
filesystem=None,
schema=None,
index_name=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,
preserve_index=False)
pq.write_to_dataset(output_table, base_path, partition_by,
filesystem=filesystem)
metadata_path = os.path.join(base_path, '_common_metadata')
if filesystem is not None:
with filesystem.open(metadata_path, 'wb') as f:
pq.write_metadata(output_table.schema, f)
else:
pq.write_metadata(output_table.schema, metadata_path)
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
dataset = pq.ParquetDataset(base_path,
filesystem=filesystem,
validate_schema=True)
# ARROW-2209: Ensure the dataset schema also includes the partition columns
dataset_cols = set(dataset.schema.to_arrow_schema().names)
assert dataset_cols == set(output_table.schema.names)
input_table = dataset.read()
input_df = input_table.to_pandas()
# Read data back in and compare with original DataFrame
# Partitioned columns added to the end of the DataFrame when read
input_df_cols = input_df.columns.tolist()
assert partition_by == input_df_cols[-1 * len(partition_by):]
# Partitioned columns become 'categorical' dtypes
input_df = input_df[cols]
for col in partition_by:
output_df[col] = output_df[col].astype('category')
assert output_df.equals(input_df)
def _test_write_to_dataset_no_partitions(base_path, filesystem=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
output_table = pa.Table.from_pandas(output_df)
if filesystem is None:
filesystem = LocalFileSystem.get_instance()
# Without partitions, append files to root_path
n = 5
for i in range(n):
pq.write_to_dataset(output_table, base_path,
filesystem=filesystem)
output_files = [file for file in filesystem.ls(base_path)
if file.endswith(".parquet")]
assert len(output_files) == n
# Deduplicated incoming DataFrame should match
# original outgoing Dataframe
input_table = pq.ParquetDataset(base_path,
filesystem=filesystem).read()
input_df = input_table.to_pandas()
input_df = input_df.drop_duplicates()
input_df = input_df[cols]
assert output_df.equals(input_df)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_schema(tempdir):
schema = pa.schema([pa.field('group1', type=pa.string()),
pa.field('group2', type=pa.string()),
pa.field('num', type=pa.int64()),
pa.field('nan', type=pa.int32()),
pa.field('date', type=pa.timestamp(unit='us'))])
_test_write_to_dataset_with_partitions(str(tempdir), schema=schema)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_index_name(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir),
index_name='index_name')
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(tempdir):
_test_write_to_dataset_no_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df)
path = str(tempdir)
def partition_filename_callback(keys):
return "{0}-{1}.parquet".format(*keys)
pq.write_to_dataset(output_table, path,
partition_by, partition_filename_callback)
dataset = pq.ParquetDataset(path)
# ARROW-3538: Ensure partition filenames match the given pattern
# defined in the local function partition_filename_callback
expected_basenames = [
'a-e.parquet', 'a-f.parquet',
'b-e.parquet', 'b-f.parquet',
'b-g.parquet', 'c-e.parquet'
]
output_basenames = [os.path.basename(p.path) for p in dataset.pieces]
assert sorted(expected_basenames) == sorted(output_basenames)
@pytest.mark.large_memory
def test_large_table_int32_overflow():
size = np.iinfo('int32').max + 1
arr = np.ones(size, dtype='uint8')
parr = pa.array(arr, type=pa.uint8())
table = pa.Table.from_arrays([parr], names=['one'])
f = io.BytesIO()
_write_table(table, f)
def _simple_table_roundtrip(table):
stream = pa.BufferOutputStream()
_write_table(table, stream)
buf = stream.getvalue()
return _read_table(buf)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_binary_array_overflow_to_chunked():
# ARROW-3762
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
tbl = pa.Table.from_pandas(df, preserve_index=False)
read_tbl = _simple_table_roundtrip(tbl)
col0_data = read_tbl[0]
assert isinstance(col0_data, pa.ChunkedArray)
# Split up into 2GB chunks
assert col0_data.num_chunks == 2
assert tbl.equals(read_tbl)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_list_of_binary_large_cell():
# ARROW-4688
data = []
# TODO(wesm): handle chunked children
# 2^31 - 1 bytes in a single cell
# data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])
# A little under 2GB in cell each containing approximately 10MB each
data.extend([[b'x' * 1000000] * 10] * 214)
arr = pa.array(data)
table = pa.Table.from_arrays([arr], ['chunky_cells'])
read_table = _simple_table_roundtrip(table)
assert table.equals(read_table)
@pytest.mark.pandas
def test_index_column_name_duplicate(tempdir):
data = {
'close': {
pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,
pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,
},
'time': {
pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(
'2017-06-30 01:31:00'
),
pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(
'2017-06-30 01:32:00'
),
}
}
path = str(tempdir / 'data.parquet')
dfx = pd.DataFrame(data).set_index('time', drop=False)
tdfx = pa.Table.from_pandas(dfx)
_write_table(tdfx, path)
arrow_table = _read_table(path)
result_df = arrow_table.to_pandas()
tm.assert_frame_equal(result_df, dfx)
@pytest.mark.pandas
def test_parquet_nested_convenience(tempdir):
# ARROW-1684
df = pd.DataFrame({
'a': [[1, 2, 3], None, [4, 5], []],
'b': [[1.], None, None, [6., 7.]],
})
path = str(tempdir / 'nested_convenience.parquet')
table = pa.Table.from_pandas(df, preserve_index=False)
_write_table(table, path)
read = pq.read_table(path, columns=['a'])
tm.assert_frame_equal(read.to_pandas(), df[['a']])
read = pq.read_table(path, columns=['a', 'b'])
tm.assert_frame_equal(read.to_pandas(), df)
@pytest.mark.pandas
def test_backwards_compatible_index_naming(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=None, header=0, engine='python')
table = _read_table(datadir / 'v0.7.1.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_some_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string),
sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
expected.index = expected.index.set_names(['cut', None, 'clarity'])
table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_column_metadata_handling(datadir):
expected = pd.DataFrame(
{'a': [1, 2, 3], 'b': [.1, .2, .3],
'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
expected.index = pd.MultiIndex.from_arrays(
[['a', 'b', 'c'],
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')],
names=['index', None])
path = datadir / 'v0.7.1.column-metadata-handling.parquet'
table = _read_table(path)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
table = _read_table(path, columns=['a'])
result = table.to_pandas()
tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))
def _make_dataset_for_pickling(tempdir, N=100):
path = tempdir / 'data.parquet'
fs = LocalFileSystem.get_instance()
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
table = pa.Table.from_pandas(df)
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
return dataset
@pytest.mark.pandas
@pytest.mark.parametrize('pickler', [
pytest.param(pickle, id='builtin'),
pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')
])
def test_pickle_dataset(tempdir, datadir, pickler):
def is_pickleable(obj):
return obj == pickler.loads(pickler.dumps(obj))
dataset = _make_dataset_for_pickling(tempdir)
assert is_pickleable(dataset)
assert is_pickleable(dataset.metadata)
assert is_pickleable(dataset.metadata.schema)
assert len(dataset.metadata.schema)
for column in dataset.metadata.schema:
assert is_pickleable(column)
for piece in dataset.pieces:
assert is_pickleable(piece)
metadata = piece.get_metadata()
assert metadata.num_row_groups
for i in range(metadata.num_row_groups):
assert is_pickleable(metadata.row_group(i))
@pytest.mark.pandas
def test_decimal_roundtrip(tempdir):
num_values = 10
columns = {}
for precision in range(1, 39):
for scale in range(0, precision + 1):
with util.random_seed(0):
random_decimal_values = [
util.randdecimal(precision, scale)
for _ in range(num_values)
]
column_name = ('dec_precision_{:d}_scale_{:d}'
.format(precision, scale))
columns[column_name] = random_decimal_values
expected = pd.DataFrame(columns)
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
table = pa.Table.from_pandas(expected)
_write_table(table, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@pytest.mark.xfail(
raises=pa.ArrowException, reason='Parquet does not support negative scale'
)
def test_decimal_roundtrip_negative_scale(tempdir):
expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
t = pa.Table.from_pandas(expected)
_write_table(t, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj_with_exception(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
error_text = 'Artificial Error'
try:
with pq.ParquetWriter(out,
arrow_table.schema,
version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
if i == 5:
raise ValueError(error_text)
except Exception as e:
assert str(e) == error_text
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_zlib_compression_bug():
# ARROW-3514: "zlib deflate failed, output buffer too small"
table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])
f = io.BytesIO()
pq.write_table(table, f, compression='gzip')
f.seek(0)
roundtrip = pq.read_table(f)
tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema)
assert table1.schema.equals(table2.schema, check_metadata=False)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
def test_empty_row_groups(tempdir):
# ARROW-3020
table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])
path = tempdir / 'empty_row_groups.parquet'
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
for i in range(num_groups):
assert reader.read_row_group(i).equals(table)
@pytest.mark.pandas
def test_parquet_writer_with_caller_provided_filesystem():
out = pa.BufferOutputStream()
class CustomFS(FileSystem):
def __init__(self):
self.path = None
self.mode = None
def open(self, path, mode='rb'):
self.path = path
self.mode = mode
return out
fs = CustomFS()
fname = 'expected_fname.parquet'
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \
as writer:
writer.write_table(table)
assert fs.path == fname
assert fs.mode == 'wb'
assert out.closed
buf = out.getvalue()
table_read = _read_table(pa.BufferReader(buf))
df_read = table_read.to_pandas()
tm.assert_frame_equal(df_read, df)
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError) as err_info:
pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)
expected_msg = ("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
assert str(err_info) == expected_msg
def test_writing_empty_lists():
# ARROW-2591: [Python] Segmentation fault issue in pq.write_table
arr1 = pa.array([[], []], pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr1], ['list(int32)'])
_check_roundtrip(table)
def test_write_nested_zero_length_array_chunk_failure():
# Bug report in ARROW-3792
cols = OrderedDict(
int32=pa.int32(),
list_string=pa.list_(pa.string())
)
data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]
# This produces a table with a column like
# <Column name='list_string' type=ListType(list<item: string>)>
# [
# [],
# [
# [
# "G"
# ]
# ]
# ]
#
# Each column is a ChunkedArray with 2 elements
my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()
for batch in data]
my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols))
for batch in my_arrays]
tbl = pa.Table.from_batches(my_batches, pa.schema(cols))
_check_roundtrip(tbl)
@pytest.mark.pandas
def test_partitioned_dataset(tempdir):
# ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset
# to a Parquet file
path = tempdir / "ARROW-3208"
df = pd.DataFrame({
'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],
'two': [-1, 10, 2, 100, 1000, 1, 11],
'three': [0, 0, 0, 0, 0, 0, 0]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'])
table = pq.ParquetDataset(path).read()
pq.write_table(table, path / "output.parquet")
def test_read_column_invalid_index():
table = pa.table([pa.array([4, 5]), pa.array(["foo", "bar"])],
names=['ints', 'strs'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
f = pq.ParquetFile(bio.getvalue())
assert f.reader.read_column(0).to_pylist() == [4, 5]
assert f.reader.read_column(1).to_pylist() == ["foo", "bar"]
for index in (-1, 2):
with pytest.raises((ValueError, IndexError)):
f.reader.read_column(index)
def test_direct_read_dictionary():
# ARROW-3325
repeats = 10
nunique = 5
data = [
[tm.rands(10) for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0'])
# Compute dictionary-encoded subfield
expected = pa.table([table[0].dictionary_encode()], names=['f0'])
assert result.equals(expected)
def test_dataset_read_dictionary(tempdir):
path = tempdir / "ARROW-3325-dataset"
t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
pq.write_to_dataset(t1, root_path=str(path))
pq.write_to_dataset(t2, root_path=str(path))
result = pq.ParquetDataset(path, read_dictionary=['f0']).read()
# The order of the chunks is non-deterministic
ex_chunks = [t1[0].chunk(0).dictionary_encode(),
t2[0].chunk(0).dictionary_encode()]
assert result[0].num_chunks == 2
c0, c1 = result[0].chunk(0), result[0].chunk(1)
if c0.equals(ex_chunks[0]):
assert c1.equals(ex_chunks[1])
else:
assert c0.equals(ex_chunks[1])
assert c1.equals(ex_chunks[0])
def test_direct_read_dictionary_subfield():
repeats = 10
nunique = 5
data = [
[[tm.rands(10)] for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0.list.item'])
arr = pa.array(data[0])
values_as_dict = arr.values.dictionary_encode()
inner_indices = values_as_dict.indices.cast('int32')
new_values = pa.DictionaryArray.from_arrays(inner_indices,
values_as_dict.dictionary)
offsets = pa.array(range(51), type='int32')
expected_arr = pa.ListArray.from_arrays(offsets, new_values)
expected = pa.table([expected_arr], names=['f0'])
assert result.equals(expected)
assert result[0].num_chunks == 1
@pytest.mark.pandas
def test_dataset_metadata(tempdir):
path = tempdir / "ARROW-1983-dataset"
# create and write a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
metadata_list = []
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'],
metadata_collector=metadata_list)
# open the dataset and collect metadata from pieces:
dataset = pq.ParquetDataset(path)
metadata_list2 = [p.get_metadata() for p in dataset.pieces]
# compare metadata list content:
assert len(metadata_list) == len(metadata_list2)
for md, md2 in zip(metadata_list, metadata_list2):
d = md.to_dict()
d2 = md2.to_dict()
# serialized_size is initialized in the reader:
assert d.pop('serialized_size') == 0
assert d2.pop('serialized_size') > 0
assert d == d2
def test_parquet_file_too_small(tempdir):
path = str(tempdir / "test.parquet")
with pytest.raises(pa.ArrowIOError,
match='size is 0 bytes'):
with open(path, 'wb') as f:
pass
pq.read_table(path)
with pytest.raises(pa.ArrowIOError,
match='size is 4 bytes'):
with open(path, 'wb') as f:
f.write(b'ffff')
pq.read_table(path)
@pytest.mark.pandas
def test_categorical_index_survives_roundtrip():
# ARROW-3652, addressed by ARROW-3246
df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])
df['c1'] = df['c1'].astype('category')
df = df.set_index(['c1'])
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
ref_df = pq.read_pandas(bos.getvalue()).to_pandas()
assert isinstance(ref_df.index, pd.CategoricalIndex)
assert ref_df.index.equals(df.index)
def test_dictionary_array_automatically_read():
# ARROW-3246
# Make a large dictionary, a little over 4MB of data
dict_length = 4000
dict_values = pa.array([('x' * 1000 + '_{}'.format(i))
for i in range(dict_length)])
num_chunks = 10
chunk_size = 100
chunks = []
for i in range(num_chunks):
indices = np.random.randint(0, dict_length,
size=chunk_size).astype(np.int32)
chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),
dict_values))
table = pa.table([pa.chunked_array(chunks)], names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents))
assert result.equals(table)
# The only key in the metadata was the Arrow schema key
assert result.schema.metadata is None
@pytest.mark.pandas
def test_pandas_categorical_na_type_row_groups():
# ARROW-5085
df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100})
df_category = df.astype({"col": "category", "int": "category"})
table = pa.Table.from_pandas(df)
table_cat = pa.Table.from_pandas(df_category)
buf = pa.BufferOutputStream()
# it works
pq.write_table(table_cat, buf, version="2.0", chunk_size=10)
result = pq.read_table(buf.getvalue())
# Result is non-categorical
assert result[0].equals(table[0])
assert result[1].equals(table[1])
@pytest.mark.pandas
def test_pandas_categorical_roundtrip():
# ARROW-5480, this was enabled by ARROW-3246
# Have one of the categories unobserved and include a null (-1)
codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')
categories = ['foo', 'bar', 'baz']
df = pd.DataFrame({'x': pd.Categorical.from_codes(
codes, categories=categories)})
buf = pa.BufferOutputStream()
pq.write_table(pa.table(df), buf)
result = pq.read_table(buf.getvalue()).to_pandas()
assert result.x.dtype == 'category'
assert (result.x.cat.categories == categories).all()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
def test_multi_dataset_metadata(tempdir):
filenames = ["ARROW-1983-dataset.0", "ARROW-1983-dataset.1"]
metapath = str(tempdir / "_metadata")
# create a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
# write dataset twice and collect/merge metadata
_meta = None
for filename in filenames:
meta = []
pq.write_table(table, str(tempdir / filename),
metadata_collector=meta)
meta[0].set_file_path(filename)
if _meta is None:
_meta = meta[0]
else:
_meta.append_row_groups(meta[0])
# Write merged metadata-only file
with open(metapath, "wb") as f:
_meta.write_metadata_file(f)
# Read back the metadata
meta = pq.read_metadata(metapath)
md = meta.to_dict()
_md = _meta.to_dict()
for key in _md:
if key != 'serialized_size':
assert _md[key] == md[key]
assert _md['num_columns'] == 3
assert _md['num_rows'] == 6
assert _md['num_row_groups'] == 2
assert _md['serialized_size'] == 0
assert md['serialized_size'] > 0
@pytest.mark.pandas
def test_filter_before_validate_schema(tempdir):
# ARROW-4076 apply filter before schema validation
# to avoid checking unneeded schemas
# create partitioned dataset with mismatching schemas which would
# otherwise raise if first validation all schemas
dir1 = tempdir / 'A=0'
dir1.mkdir()
table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))
pq.write_table(table1, dir1 / 'data.parquet')
dir2 = tempdir / 'A=1'
dir2.mkdir()
table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))
pq.write_table(table2, dir2 / 'data.parquet')
# read single file using filter
table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])
assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
| true | true |
f7175c38c11862b31ec72a419e525c555b34bcf3 | 16,570 | py | Python | batch/processor_BondLedger_JP.py | BoostryJP/ibet-Issuer | efc599f8784be06588cf3ad8f239d36f24fdf3fa | [
"Apache-2.0"
] | 1 | 2021-06-16T03:38:07.000Z | 2021-06-16T03:38:07.000Z | batch/processor_BondLedger_JP.py | BoostryJP/ibet-Issuer | efc599f8784be06588cf3ad8f239d36f24fdf3fa | [
"Apache-2.0"
] | 17 | 2021-04-26T03:28:40.000Z | 2021-11-24T07:15:55.000Z | batch/processor_BondLedger_JP.py | BoostryJP/ibet-Issuer | efc599f8784be06588cf3ad8f239d36f24fdf3fa | [
"Apache-2.0"
] | 1 | 2021-05-30T14:09:11.000Z | 2021-05-30T14:09:11.000Z | """
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
import base64
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from datetime import (
datetime,
timezone,
timedelta
)
import json
import os
import sys
import time
from eth_utils import to_checksum_address
from sqlalchemy import (
create_engine,
func
)
from sqlalchemy.orm import (
sessionmaker,
scoped_session
)
from web3 import Web3
from web3.middleware import geth_poa_middleware
path = os.path.join(os.path.dirname(__file__), '../')
sys.path.append(path)
from app.utils import ContractUtils
from app.models import (
Token,
UTXO,
BondLedger,
BondLedgerBlockNumber,
Issuer,
CorporateBondLedgerTemplate,
PersonalInfo as PersonalInfoModel
)
from config import Config
import log
process_name = "PROCESSOR-BondLedger"
LOG = log.get_logger(process_name=process_name)
web3 = Web3(Web3.HTTPProvider(Config.WEB3_HTTP_PROVIDER))
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
engine = create_engine(Config.SQLALCHEMY_DATABASE_URI, echo=False)
db_session = scoped_session(sessionmaker())
db_session.configure(bind=engine)
JST = timezone(timedelta(hours=+9), "JST")
class Sinks:
def __init__(self):
self.sinks = []
def register(self, sink):
self.sinks.append(sink)
def on_utxo(self, *args, **kwargs):
for sink in self.sinks:
sink.on_utxo(*args, **kwargs)
def on_bond_ledger(self, *args, **kwargs):
for sink in self.sinks:
sink.on_bond_ledger(*args, **kwargs)
def flush(self, *args, **kwargs):
for sink in self.sinks:
sink.flush(*args, **kwargs)
class DBSink:
def __init__(self, db):
self.db = db
def on_utxo(self, spent: bool, transaction_hash: str,
account_address: str, token_address: str, amount: int,
block_timestamp: datetime, transaction_date_jst: str):
if spent is False:
LOG.debug(f"Append UTXO: account_address={account_address}, token_address={token_address}, amount={amount}")
utxo = self.db.query(UTXO). \
filter(UTXO.transaction_hash == transaction_hash). \
first()
if utxo is None:
utxo = UTXO()
utxo.transaction_hash = transaction_hash
utxo.account_address = account_address
utxo.token_address = token_address
utxo.amount = amount
utxo.block_timestamp = block_timestamp
utxo.transaction_date_jst = transaction_date_jst
self.db.add(utxo)
else:
LOG.debug(f"Spend UTXO: account_address={account_address}, token_address={token_address}, amount={amount}")
utxo_list = self.db.query(UTXO). \
filter(UTXO.account_address == account_address). \
filter(UTXO.token_address == token_address). \
filter(UTXO.amount > 0). \
order_by(UTXO.block_timestamp). \
all()
spend_amount = amount
for utxo in utxo_list:
utxo_amount = utxo.amount
if spend_amount <= 0:
pass
elif utxo.amount <= spend_amount:
utxo.amount = 0
spend_amount = spend_amount - utxo_amount
self.db.merge(utxo)
else:
utxo.amount = utxo_amount - spend_amount
spend_amount = 0
self.db.merge(utxo)
def on_bond_ledger(self, token):
#########################################
# 原簿作成日
#########################################
created_date = datetime.utcnow().replace(tzinfo=timezone.utc).astimezone(JST).strftime("%Y/%m/%d")
#########################################
# 社債の情報
#########################################
ledger_template = self.db.query(CorporateBondLedgerTemplate). \
filter(CorporateBondLedgerTemplate.token_address == token.address). \
first()
if ledger_template is not None:
bond_description = {
"社債名称": ledger_template.bond_name,
"社債の説明": ledger_template.bond_description,
"社債の総額": ledger_template.total_amount,
"各社債の金額": ledger_template.face_value,
"払込情報": {
"払込金額": ledger_template.payment_amount,
"払込日": ledger_template.payment_date,
"払込状況": ledger_template.payment_status
},
"社債の種類": ledger_template.bond_type
}
else:
bond_description = {
"社債名称": "",
"社債の説明": "",
"社債の総額": None,
"各社債の金額": None,
"払込情報": {
"払込金額": None,
"払込日": "",
"払込状況": None
},
"社債の種類": ""
}
#########################################
# 原簿管理人
#########################################
if ledger_template is not None:
ledger_admin = {
"氏名または名称": ledger_template.ledger_admin_name,
"住所": ledger_template.ledger_admin_address,
"事務取扱場所": ledger_template.ledger_admin_location
}
else:
ledger_admin = {
"氏名または名称": "",
"住所": "",
"事務取扱場所": ""
}
#########################################
# 債権者情報
#########################################
issuer_address = token.functions.owner().call()
face_value = token.functions.faceValue().call()
utxo_list = self.db.query(UTXO.account_address, UTXO.token_address, func.sum(UTXO.amount),
UTXO.transaction_date_jst). \
filter(UTXO.token_address == token.address). \
filter(UTXO.amount > 0). \
group_by(UTXO.account_address, UTXO.token_address, UTXO.transaction_date_jst). \
all()
creditors = []
for utxo in utxo_list:
account_address = utxo[0]
amount = utxo[2]
transaction_date_jst = utxo[3]
# 初期値設定
details = {
"アカウントアドレス": account_address,
"氏名または名称": "",
"住所": "",
"社債金額": face_value * amount,
"取得日": transaction_date_jst,
"金銭以外の財産給付情報": {
"財産の価格": "-",
"給付日": "-"
},
"債権相殺情報": {
"相殺する債権額": "-",
"相殺日": "-"
},
"質権情報": {
"質権者の氏名または名称": "-",
"質権者の住所": "-",
"質権の目的である債券": "-"
},
"備考": "-"
}
# 個人情報取得
personal_info_json = self.__get_personalinfo_from_db(
account_address=account_address,
issuer_address=issuer_address
)
if personal_info_json is None: # DBに情報が登録されていない場合はコントラクトから情報を取得する
personal_info_contract_address = token.functions.personalInfoAddress().call()
personal_info_json = self.__get_personalinfo_from_contract(
account_address=account_address,
issuer_address=issuer_address,
personal_info_contract_address=personal_info_contract_address
)
if personal_info_json is not None:
name = personal_info_json.get("name", "") # 氏名
address = personal_info_json.get("address", "") # 住所
else:
name = ""
address = ""
# 保有者情報設定
details["氏名または名称"] = name
details["住所"] = address
creditors.append(details)
# 原簿保管
ledger = {
"社債原簿作成日": created_date,
"社債情報": bond_description,
"社債原簿管理人": ledger_admin,
"社債権者": creditors
}
bond_ledger = BondLedger(
token_address=token.address,
ledger=json.dumps(ledger, ensure_ascii=False).encode()
)
self.db.add(bond_ledger)
def __get_personalinfo_from_db(self, account_address: str, issuer_address: str):
"""個人情報取得(DB)
:param account_address: アカウントアドレス
:param issuer_address: 発行体アドレス
:return: 個人情報JSON
"""
# 個人情報取得
personal_info_record = self.db.query(PersonalInfoModel). \
filter(PersonalInfoModel.account_address == to_checksum_address(account_address)). \
filter(PersonalInfoModel.issuer_address == to_checksum_address(issuer_address)). \
first()
if personal_info_record is not None:
personal_info_json = personal_info_record.personal_info
else:
personal_info_json = None
return personal_info_json
def __get_personalinfo_from_contract(self, account_address: str, issuer_address: str,
personal_info_contract_address: str):
"""個人情報取得(コントラクト)
:param account_address: アカウントアドレス
:param issuer_address: 発行体アドレス
:param personal_info_contract_address: 個人情報コントラクトアドレス
:return: 個人情報JSON
"""
personal_info_json = None
try:
issuer = self.db.query(Issuer).filter(Issuer.eth_account == issuer_address).first()
personal_info_contract = ContractUtils.get_contract('PersonalInfo', personal_info_contract_address)
cipher = None
try:
key = RSA.importKey(issuer.encrypted_rsa_private_key, Config.RSA_PASSWORD)
cipher = PKCS1_OAEP.new(key)
except Exception as err:
LOG.error(f"Cannot open the private key: {err}")
# 暗号化個人情報取得
personal_info = personal_info_contract.functions. \
personal_info(account_address, issuer_address). \
call()
encrypted_personal_info = personal_info[2]
if encrypted_personal_info != '' and cipher is not None: # 情報が空の場合、デフォルト値を設定
# 個人情報復号化
ciphertext = base64.decodebytes(encrypted_personal_info.encode('utf-8'))
# NOTE:
# JavaScriptでRSA暗号化する際に、先頭が0x00の場合は00を削った状態でデータが連携される。
# そのままdecryptすると、ValueError(Ciphertext with incorrect length)になるため、
# 先頭に再度00を加えて、decryptを行う。
if len(ciphertext) == 1279:
hex_fixed = "00" + ciphertext.hex()
ciphertext = base64.b16decode(hex_fixed.upper())
message = cipher.decrypt(ciphertext)
personal_info_json = json.loads(message)
except Exception as err:
LOG.error(f"Failed to decrypt: {err} : account_address = {account_address}")
return personal_info_json
def flush(self):
self.db.commit()
class Processor:
def __init__(self, db, sink):
self.sink = sink
self.db = db
self.token_list = []
def process(self):
self.__refresh_token_list()
ledger_block_number = self.__get_ledger_blocknumber()
latest_block = web3.eth.blockNumber
if ledger_block_number >= latest_block:
LOG.debug("skip process")
pass
else:
LOG.debug("syncing from={}, to={}".format(ledger_block_number + 1, latest_block))
for token in self.token_list:
event_triggered = self.__create_utxo(token, ledger_block_number + 1, latest_block)
if event_triggered: # UTXOの更新イベントが発生している場合
self.__create_ledger(token)
self.__set_ledger_blocknumber(latest_block)
self.sink.flush()
def __refresh_token_list(self):
"""発行済トークンの直近化
:return: None
"""
self.token_list = []
issued_tokens = self.db.query(Token). \
filter(Token.template_id == Config.TEMPLATE_ID_SB). \
all()
for issued_token in issued_tokens:
if issued_token.token_address is not None:
abi = json.loads(issued_token.abi.replace("'", '"').replace('True', 'true').replace('False', 'false'))
token_contract = web3.eth.contract(
address=issued_token.token_address,
abi=abi
)
self.token_list.append(token_contract)
def __get_ledger_blocknumber(self):
block_number = self.db.query(BondLedgerBlockNumber).first()
if block_number is None:
return 0
else:
return block_number.latest_block_number
def __set_ledger_blocknumber(self, block_number: int):
"""latest block number の設定
:param block_number: 設定するblockNumber
:return: None
"""
ledger_block = self.db.query(BondLedgerBlockNumber).first()
if ledger_block is None:
ledger_block = BondLedgerBlockNumber()
ledger_block.latest_block_number = block_number
else:
ledger_block.latest_block_number = block_number
self.db.merge(ledger_block)
def __create_utxo(self, token, from_block: int, to_block: int) -> bool:
"""UTXO作成(Transferイベント発生時)
:param token: token contract
:param from_block: from block number
:param to_block: to block number
:return: event_triggered イベント発生
"""
event_triggered = False
events = token.events.Transfer.getLogs(
fromBlock=from_block,
toBlock=to_block
)
for event in events:
event_triggered = True
transaction_hash = event["transactionHash"].hex()
args = event["args"]
from_account = args.get("from", Config.ZERO_ADDRESS)
to_account = args.get("to", Config.ZERO_ADDRESS)
amount = args.get("value")
block_timestamp = datetime.fromtimestamp(
web3.eth.getBlock(event['blockNumber'])['timestamp']
)
block_timestamp_jst = block_timestamp.replace(tzinfo=timezone.utc). \
astimezone(JST)
transaction_date_jst = block_timestamp_jst.strftime("%Y/%m/%d")
if amount is not None and amount <= sys.maxsize:
# UTXOの更新(from account)
self.sink.on_utxo(
spent=True,
transaction_hash=transaction_hash,
token_address=token.address,
account_address=from_account,
amount=amount,
block_timestamp=block_timestamp,
transaction_date_jst=transaction_date_jst
)
# UTXOの更新(to account)
self.sink.on_utxo(
spent=False,
transaction_hash=transaction_hash,
token_address=token.address,
account_address=to_account,
amount=amount,
block_timestamp=block_timestamp,
transaction_date_jst=transaction_date_jst
)
return event_triggered
def __create_ledger(self, token):
"""原簿作成
:param token: token contract
"""
self.sink.on_bond_ledger(token=token)
sinks = Sinks()
sinks.register(DBSink(db_session))
processor = Processor(db=db_session, sink=sinks)
LOG.info("Service started successfully")
while True:
try:
processor.process()
LOG.debug("processed")
except Exception as ex:
LOG.exception(ex)
# 1分間隔で実行
time.sleep(Config.INTERVAL_PROCESSOR_BOND_LEDGER_JP)
| 34.957806 | 120 | 0.559626 | import base64
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from datetime import (
datetime,
timezone,
timedelta
)
import json
import os
import sys
import time
from eth_utils import to_checksum_address
from sqlalchemy import (
create_engine,
func
)
from sqlalchemy.orm import (
sessionmaker,
scoped_session
)
from web3 import Web3
from web3.middleware import geth_poa_middleware
path = os.path.join(os.path.dirname(__file__), '../')
sys.path.append(path)
from app.utils import ContractUtils
from app.models import (
Token,
UTXO,
BondLedger,
BondLedgerBlockNumber,
Issuer,
CorporateBondLedgerTemplate,
PersonalInfo as PersonalInfoModel
)
from config import Config
import log
process_name = "PROCESSOR-BondLedger"
LOG = log.get_logger(process_name=process_name)
web3 = Web3(Web3.HTTPProvider(Config.WEB3_HTTP_PROVIDER))
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
engine = create_engine(Config.SQLALCHEMY_DATABASE_URI, echo=False)
db_session = scoped_session(sessionmaker())
db_session.configure(bind=engine)
JST = timezone(timedelta(hours=+9), "JST")
class Sinks:
def __init__(self):
self.sinks = []
def register(self, sink):
self.sinks.append(sink)
def on_utxo(self, *args, **kwargs):
for sink in self.sinks:
sink.on_utxo(*args, **kwargs)
def on_bond_ledger(self, *args, **kwargs):
for sink in self.sinks:
sink.on_bond_ledger(*args, **kwargs)
def flush(self, *args, **kwargs):
for sink in self.sinks:
sink.flush(*args, **kwargs)
class DBSink:
def __init__(self, db):
self.db = db
def on_utxo(self, spent: bool, transaction_hash: str,
account_address: str, token_address: str, amount: int,
block_timestamp: datetime, transaction_date_jst: str):
if spent is False:
LOG.debug(f"Append UTXO: account_address={account_address}, token_address={token_address}, amount={amount}")
utxo = self.db.query(UTXO). \
filter(UTXO.transaction_hash == transaction_hash). \
first()
if utxo is None:
utxo = UTXO()
utxo.transaction_hash = transaction_hash
utxo.account_address = account_address
utxo.token_address = token_address
utxo.amount = amount
utxo.block_timestamp = block_timestamp
utxo.transaction_date_jst = transaction_date_jst
self.db.add(utxo)
else:
LOG.debug(f"Spend UTXO: account_address={account_address}, token_address={token_address}, amount={amount}")
utxo_list = self.db.query(UTXO). \
filter(UTXO.account_address == account_address). \
filter(UTXO.token_address == token_address). \
filter(UTXO.amount > 0). \
order_by(UTXO.block_timestamp). \
all()
spend_amount = amount
for utxo in utxo_list:
utxo_amount = utxo.amount
if spend_amount <= 0:
pass
elif utxo.amount <= spend_amount:
utxo.amount = 0
spend_amount = spend_amount - utxo_amount
self.db.merge(utxo)
else:
utxo.amount = utxo_amount - spend_amount
spend_amount = 0
self.db.merge(utxo)
def on_bond_ledger(self, token):
info_json
def flush(self):
self.db.commit()
class Processor:
def __init__(self, db, sink):
self.sink = sink
self.db = db
self.token_list = []
def process(self):
self.__refresh_token_list()
ledger_block_number = self.__get_ledger_blocknumber()
latest_block = web3.eth.blockNumber
if ledger_block_number >= latest_block:
LOG.debug("skip process")
pass
else:
LOG.debug("syncing from={}, to={}".format(ledger_block_number + 1, latest_block))
for token in self.token_list:
event_triggered = self.__create_utxo(token, ledger_block_number + 1, latest_block)
if event_triggered:
self.__create_ledger(token)
self.__set_ledger_blocknumber(latest_block)
self.sink.flush()
def __refresh_token_list(self):
self.token_list = []
issued_tokens = self.db.query(Token). \
filter(Token.template_id == Config.TEMPLATE_ID_SB). \
all()
for issued_token in issued_tokens:
if issued_token.token_address is not None:
abi = json.loads(issued_token.abi.replace("'", '"').replace('True', 'true').replace('False', 'false'))
token_contract = web3.eth.contract(
address=issued_token.token_address,
abi=abi
)
self.token_list.append(token_contract)
def __get_ledger_blocknumber(self):
block_number = self.db.query(BondLedgerBlockNumber).first()
if block_number is None:
return 0
else:
return block_number.latest_block_number
def __set_ledger_blocknumber(self, block_number: int):
ledger_block = self.db.query(BondLedgerBlockNumber).first()
if ledger_block is None:
ledger_block = BondLedgerBlockNumber()
ledger_block.latest_block_number = block_number
else:
ledger_block.latest_block_number = block_number
self.db.merge(ledger_block)
def __create_utxo(self, token, from_block: int, to_block: int) -> bool:
event_triggered = False
events = token.events.Transfer.getLogs(
fromBlock=from_block,
toBlock=to_block
)
for event in events:
event_triggered = True
transaction_hash = event["transactionHash"].hex()
args = event["args"]
from_account = args.get("from", Config.ZERO_ADDRESS)
to_account = args.get("to", Config.ZERO_ADDRESS)
amount = args.get("value")
block_timestamp = datetime.fromtimestamp(
web3.eth.getBlock(event['blockNumber'])['timestamp']
)
block_timestamp_jst = block_timestamp.replace(tzinfo=timezone.utc). \
astimezone(JST)
transaction_date_jst = block_timestamp_jst.strftime("%Y/%m/%d")
if amount is not None and amount <= sys.maxsize:
# UTXOの更新(from account)
self.sink.on_utxo(
spent=True,
transaction_hash=transaction_hash,
token_address=token.address,
account_address=from_account,
amount=amount,
block_timestamp=block_timestamp,
transaction_date_jst=transaction_date_jst
)
# UTXOの更新(to account)
self.sink.on_utxo(
spent=False,
transaction_hash=transaction_hash,
token_address=token.address,
account_address=to_account,
amount=amount,
block_timestamp=block_timestamp,
transaction_date_jst=transaction_date_jst
)
return event_triggered
def __create_ledger(self, token):
self.sink.on_bond_ledger(token=token)
sinks = Sinks()
sinks.register(DBSink(db_session))
processor = Processor(db=db_session, sink=sinks)
LOG.info("Service started successfully")
while True:
try:
processor.process()
LOG.debug("processed")
except Exception as ex:
LOG.exception(ex)
# 1分間隔で実行
time.sleep(Config.INTERVAL_PROCESSOR_BOND_LEDGER_JP)
| true | true |
f7175d22337c8b28777d73b05a950345e75f3ce4 | 1,993 | py | Python | app/forms.py | TimothyBenger/top_lists | b3b5895a3a3c525e81fe167eb7d7ba46cfcbd785 | [
"MIT"
] | null | null | null | app/forms.py | TimothyBenger/top_lists | b3b5895a3a3c525e81fe167eb7d7ba46cfcbd785 | [
"MIT"
] | null | null | null | app/forms.py | TimothyBenger/top_lists | b3b5895a3a3c525e81fe167eb7d7ba46cfcbd785 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class EditForm(FlaskForm):
title1 = StringField('Title', validators=[DataRequired()])
author1 = StringField('Author', validators=[DataRequired()])
title2 = StringField('Title', validators=[DataRequired()])
author2 = StringField('Author', validators=[DataRequired()])
title3 = StringField('Title', validators=[DataRequired()])
author3 = StringField('Author', validators=[DataRequired()])
title4 = StringField('Title', validators=[DataRequired()])
author4 = StringField('Author', validators=[DataRequired()])
title5 = StringField('Title', validators=[DataRequired()])
author5 = StringField('Author', validators=[DataRequired()])
submit = SubmitField('Submit changes')
| 44.288889 | 87 | 0.714501 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class EditForm(FlaskForm):
title1 = StringField('Title', validators=[DataRequired()])
author1 = StringField('Author', validators=[DataRequired()])
title2 = StringField('Title', validators=[DataRequired()])
author2 = StringField('Author', validators=[DataRequired()])
title3 = StringField('Title', validators=[DataRequired()])
author3 = StringField('Author', validators=[DataRequired()])
title4 = StringField('Title', validators=[DataRequired()])
author4 = StringField('Author', validators=[DataRequired()])
title5 = StringField('Title', validators=[DataRequired()])
author5 = StringField('Author', validators=[DataRequired()])
submit = SubmitField('Submit changes')
| true | true |
f7175d5c72e50e87ef42937e0544adccadf5efa8 | 452 | py | Python | lidi/signup/migrations/0002_user_conf_link.py | campovski/lidi | 9699e62e70e679970816e29ca7618c9ed0146c7e | [
"Apache-2.0"
] | null | null | null | lidi/signup/migrations/0002_user_conf_link.py | campovski/lidi | 9699e62e70e679970816e29ca7618c9ed0146c7e | [
"Apache-2.0"
] | 21 | 2017-06-03T14:16:14.000Z | 2018-05-29T07:28:27.000Z | lidi/signup/migrations/0002_user_conf_link.py | campovski/lidi | 9699e62e70e679970816e29ca7618c9ed0146c7e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-02 18:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signup', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='conf_link',
field=models.CharField(default=b'', max_length=200),
),
]
| 21.52381 | 64 | 0.606195 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('signup', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='conf_link',
field=models.CharField(default=b'', max_length=200),
),
]
| true | true |
f7175d75f291571b61c64c017b95e8592d28ea76 | 424 | py | Python | app/moviestore/migrations/0007_alter_movie_user_charge.py | GeorgiosDolias/Movie-Store-REST-API | 3a07301e4574071d6edb00d1a8b2c266c1fc8ff1 | [
"MIT"
] | null | null | null | app/moviestore/migrations/0007_alter_movie_user_charge.py | GeorgiosDolias/Movie-Store-REST-API | 3a07301e4574071d6edb00d1a8b2c266c1fc8ff1 | [
"MIT"
] | null | null | null | app/moviestore/migrations/0007_alter_movie_user_charge.py | GeorgiosDolias/Movie-Store-REST-API | 3a07301e4574071d6edb00d1a8b2c266c1fc8ff1 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-05 00:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moviestore', '0006_movie_user_charge'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='user_charge',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=5),
),
]
| 22.315789 | 82 | 0.620283 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('moviestore', '0006_movie_user_charge'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='user_charge',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=5),
),
]
| true | true |
f7175f33e2db2534a03ad6bfb8c47d7e1b04f568 | 486 | py | Python | main/strings/slice/words.py | catalinprescure/python-pages | 93df3b22df2cfa269127e803a1b6c6a34bae6745 | [
"MIT"
] | null | null | null | main/strings/slice/words.py | catalinprescure/python-pages | 93df3b22df2cfa269127e803a1b6c6a34bae6745 | [
"MIT"
] | null | null | null | main/strings/slice/words.py | catalinprescure/python-pages | 93df3b22df2cfa269127e803a1b6c6a34bae6745 | [
"MIT"
] | 1 | 2021-12-24T15:58:32.000Z | 2021-12-24T15:58:32.000Z | # Open file and search through words list
# Return number of words with no e in them
import os
file = os.path.dirname(__file__) + "/words.txt"
rows = open(file)
def has_no_e(word):
for letter in word:
if letter == "e":
return False
return True
W = [] # words
E = [] # word with no e
for row in rows:
word = row.strip()
W.append(word)
if (has_no_e(word)):
E.append(word)
print("W: " + repr(len(W)))
print("E: " + repr(len(E))) | 18.692308 | 47 | 0.584362 |
import os
file = os.path.dirname(__file__) + "/words.txt"
rows = open(file)
def has_no_e(word):
for letter in word:
if letter == "e":
return False
return True
W = []
E = []
for row in rows:
word = row.strip()
W.append(word)
if (has_no_e(word)):
E.append(word)
print("W: " + repr(len(W)))
print("E: " + repr(len(E))) | true | true |
f717606f6302397f6adbcd4ea5ca8a1e1e665802 | 374 | py | Python | setup.py | adamjoshuagray/PredictItPy | 4006cb00f38c256765b556b3476c286470555533 | [
"MIT"
] | null | null | null | setup.py | adamjoshuagray/PredictItPy | 4006cb00f38c256765b556b3476c286470555533 | [
"MIT"
] | 1 | 2016-11-08T10:02:31.000Z | 2016-11-08T10:02:31.000Z | setup.py | adamjoshuagray/PredictItPy | 4006cb00f38c256765b556b3476c286470555533 | [
"MIT"
] | 1 | 2019-07-04T10:53:41.000Z | 2019-07-04T10:53:41.000Z | from setuptools import setup
setup(name='predictitpy',
version='0.2',
py_modules=['predictitpy'],
description='A very light wrapper around the PredictIt.org market data api.',
url='https://github.com/adamjoshuagray/predictitpy',
author='Adam J. Gray',
author_email='adam.joshua.gray@gmail.com',
license='MIT',
zip_safe=False) | 34 | 83 | 0.671123 | from setuptools import setup
setup(name='predictitpy',
version='0.2',
py_modules=['predictitpy'],
description='A very light wrapper around the PredictIt.org market data api.',
url='https://github.com/adamjoshuagray/predictitpy',
author='Adam J. Gray',
author_email='adam.joshua.gray@gmail.com',
license='MIT',
zip_safe=False) | true | true |
f71763968b7433700606cc9fe3afcf0b874429db | 129,676 | py | Python | salt/ext/tornado/web.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-03-31T22:51:16.000Z | 2020-03-31T22:51:16.000Z | salt/ext/tornado/web.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | null | null | null | salt/ext/tornado/web.py | yuriks/salt | d2a5bd8adddb98ec1718d79384aa13b4f37e8028 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-30T07:00:01.000Z | 2021-09-30T07:00:01.000Z | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""``tornado.web`` provides a simple web framework with asynchronous
features that allow it to scale to large numbers of open connections,
making it ideal for `long polling
<http://en.wikipedia.org/wiki/Push_technology#Long_polling>`_.
Here is a simple "Hello, world" example app:
.. testcode::
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
See the :doc:`guide` for additional information.
Thread-safety notes
-------------------
In general, methods on `RequestHandler` and elsewhere in Tornado are
not thread-safe. In particular, methods such as
`~RequestHandler.write()`, `~RequestHandler.finish()`, and
`~RequestHandler.flush()` must only be called from the main thread. If
you use multiple threads it is important to use `.IOLoop.add_callback`
to transfer control back to the main thread before finishing the
request.
"""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import salt.ext.tornado as tornado
import traceback
import types
from inspect import isclass
from io import BytesIO
from salt.ext.tornado.concurrent import Future
from salt.ext.tornado import escape
from salt.ext.tornado import gen
from salt.ext.tornado import httputil
from salt.ext.tornado import iostream
from salt.ext.tornado import locale
from salt.ext.tornado.log import access_log, app_log, gen_log
from salt.ext.tornado import stack_context
from salt.ext.tornado import template
from salt.ext.tornado.escape import utf8, _unicode
from salt.ext.tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
ReversibleRouter, Rule, ReversibleRuleRouter,
URLSpec)
from salt.ext.tornado.util import (ObjectDict, raise_exc_info,
unicode_type, _websocket_mask, PY3)
url = URLSpec
if PY3:
import http.cookies as Cookie
import urllib.parse as urlparse
from urllib.parse import urlencode
else:
import Cookie
import urlparse
from urllib import urlencode
try:
import typing # noqa
# The following types are accepted by RequestHandler.set_header
# and related methods.
_HeaderTypes = typing.Union[bytes, unicode_type,
numbers.Integral, datetime.datetime]
except ImportError:
pass
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
"""The oldest signed value version supported by this version of Tornado.
Signed values older than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
"""The newest signed value version supported by this version of Tornado.
Signed values newer than this version cannot be decoded.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_VERSION = 2
"""The signed value version produced by `.RequestHandler.create_signed_value`.
May be overridden by passing a ``version`` keyword argument.
.. versionadded:: 3.2.1
"""
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`.
May be overridden by passing a ``min_version`` keyword argument.
.. versionadded:: 3.2.1
"""
class RequestHandler(object):
"""Base class for HTTP request handlers.
Subclasses must define at least one of the methods defined in the
"Entry points" section below.
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {} # type: typing.Dict[str, template.BaseLoader]
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self._prepared_future = None
self._headers = None # type: httputil.HTTPHeaders
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_tt_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_tt_modules` to avoid
# possible conflicts.
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization. Called for each request.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings <Application.settings>`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
Asynchronous support: Decorate this method with `.gen.coroutine`
or `.return_future` to make it asynchronous (the
`asynchronous` decorator cannot be used on `prepare`).
If this method returns a `.Future` execution will not proceed
until the `.Future` is done.
.. versionadded:: 3.1
Asynchronous support.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
self.request.body.exception()
def clear(self):
"""Resets all headers and content for this response."""
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If ``reason`` is ``None``,
it must be present in `httplib.responses <http.client.responses>`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from
`httplib.responses <http.client.responses>`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d" % status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
# type: (str, _HeaderTypes) -> None
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
def _convert_header_value(self, value):
# type: (_HeaderTypes) -> str
# Convert the input value to a str. This type check is a bit
# subtle: The bytes case only executes on python 3, and the
# unicode case only executes on python 2, because the other
# cases are covered by the first match for str.
if isinstance(value, str):
retval = value
elif isinstance(value, bytes): # py3
# Non-ascii characters in headers are not well supported,
# but if you pass bytes, use latin1 so they pass through as-is.
retval = value.decode('latin1')
elif isinstance(value, unicode_type): # py2
# TODO: This is inconsistent with the use of latin1 above,
# but it's been that way for a long time. Should it change?
retval = escape.utf8(value)
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request.
if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
raise ValueError("Unsafe header value %r", retval)
return retval
_ARG_DEFAULT = object()
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
# Make sure `get_arguments` isn't accidentally being called with a
# positional argument that's assumed to be a default (like in
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request body.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default, self.request.body_arguments,
strip)
def get_body_arguments(self, name, strip=True):
"""Returns a list of the body arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name
from the request query string.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
.. versionadded:: 3.2
"""
return self._get_argument(name, default,
self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
"""Returns a list of the query arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
.. versionadded:: 3.2
"""
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both `get_argument()` and for
values extracted from the url and passed to `get()`/`post()`/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
"""An alias for
`self.request.cookies <.httputil.HTTPServerRequest.cookies>`."""
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel
directly.
See https://docs.python.org/2/library/cookie.html#Cookie.Morsel
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name.
Due to limitations of the cookie protocol, you must pass the same
path and domain to clear a cookie as were used when that cookie
was set (but there is no way to find out on the server side
which values were used for a given cookie).
"""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
"""Deletes all the cookies the user sent with this request.
See `clear_cookie` for more information on the path and domain
parameters.
.. versionchanged:: 3.2
Added the ``path`` and ``domain`` parameters.
"""
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
.. versionchanged:: 3.2.1
Added the ``version`` argument. Introduced cookie version 2
and made it the default.
"""
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(secret, name, value, version=version,
key_version=key_version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
.. versionchanged:: 3.2.1
Added the ``min_version`` argument. Introduced cookie version 2;
both versions 1 and 2 are accepted by default.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def get_secure_cookie_key_version(self, name, value=None):
"""Returns the signing key version of the secure cookie.
The version is returned as int.
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return get_signature_key_version(value)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be ``application/json``.
(if you want to send JSON as a different ``Content-Type``, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and
https://github.com/facebook/tornado/issues/1009
"""
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
if js_files:
# Maintain order of JavaScript files given by modules
js = self.render_linked_js(js_files)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = self.render_embed_js(js_embed)
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
css = self.render_linked_css(css_files)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = self.render_embed_css(css_embed)
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_linked_js(self, js_files):
"""Default method used to render the final js links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
def render_embed_js(self, js_embed):
"""Default method used to render the final embedded js for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
def render_linked_css(self, css_files):
"""Default method used to render the final css links for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
def render_embed_css(self, css_embed):
"""Default method used to render the final embedded css for the
rendered webpage.
Override this method in a sub-classed controller to change the output.
"""
return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated byte string (in utf8). To generate and
write a template as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` and ``template_whitespace`` application
settings. If a ``template_loader`` application setting is
supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
"""Flushes the current output buffer to the network.
The ``callback`` argument, if given, can be used for flow control:
it will be run when all flushed data has been written to the socket.
Note that only one flush callback can be outstanding at a time;
if another flush occurs before the previous flush's callback
has been run, the previous callback will be discarded.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers,
chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
chunk = None
# Finalize the cookie headers (which have been stored in a side
# object so an outgoing cookie could be overwritten before it
# is sent).
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine('',
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if (self._status_code in (204, 304) or
(self._status_code >= 100 and self._status_code < 200)):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
# Now that the request is finished, clear the callback we
# set on the HTTPConnection (which would otherwise prevent the
# garbage collection of the RequestHandler when there
# are keepalive connections)
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
def _break_cycles(self):
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
# If we get an error between writing headers and finishing,
# we are unlikely to be able to finish due to a
# Content-Length mismatch. Try anyway to release the
# socket.
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response",
exc_info=True)
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
"""
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
"""The locale for the current session.
Determined by either `get_user_locale`, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or `get_browser_locale`, which uses the ``Accept-Language``
header.
.. versionchanged: 4.1
Added a property setter.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to `get_browser_locale()`.
This method should return a `tornado.locale.Locale` object,
most likely obtained via a call like ``tornado.locale.get("en")``
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from ``Accept-Language`` header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
This is set in one of two ways:
* A subclass may override `get_current_user()`, which will be called
automatically the first time ``self.current_user`` is accessed.
`get_current_user()` will only be called once per request,
and is cached for future access::
def get_current_user(self):
user_cookie = self.get_secure_cookie("user")
if user_cookie:
return json.loads(user_cookie)
return None
* It may be set as a normal variable, typically from an overridden
`prepare()`::
@gen.coroutine
def prepare(self):
user_id_cookie = self.get_secure_cookie("user_id")
if user_id_cookie:
self.current_user = yield load_user(user_id_cookie)
Note that `prepare()` may be a coroutine while `get_current_user()`
may not, so the latter form is necessary if loading the user requires
asynchronous operations.
The user object may be any type of the application's choosing.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie.
This method may not be a coroutine.
"""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the ``login_url`` application setting.
"""
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the ``template_path`` application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. versionchanged:: 3.2.2
The xsrf token will now be have a random mask applied in every
request, which makes it safe to include the token in pages
that are compressed. See http://breachattack.com for more
information on the issue fixed by this change. Old (version 1)
cookies will be converted to version 2 when this method is called
unless the ``xsrf_cookie_version`` `Application` setting is
set to 1.
.. versionchanged:: 4.3
The ``xsrf_cookie_kwargs`` `Application` setting may be
used to supply additional cookie options (which will be
passed directly to `set_cookie`). For example,
``xsrf_cookie_kwargs=dict(httponly=True, secure=True)``
will set the ``secure`` and ``httponly`` flags on the
``_xsrf`` cookie.
"""
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days,
**cookie_kwargs)
return self._xsrf_token
def _get_raw_xsrf_token(self):
"""Read or generate the xsrf token in its raw form.
The raw_xsrf_token is a tuple containing:
* version: the version of the cookie from which this token was read,
or None if we generated a new token in this request.
* token: the raw token data; random (non-ascii) bytes.
* timestamp: the time this token was generated (will not be accurate
for version 1 cookies)
"""
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
"""Convert a cookie string into a the tuple form returned by
_get_raw_xsrf_token.
"""
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
# Treat unknown versions as not present instead of failing.
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
# We don't have a usable timestamp in older versions.
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token",
exc_info=True)
return None, None, None
def check_xsrf_cookie(self):
"""Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
To prevent cross-site request forgery, we set an ``_xsrf``
cookie and include the same value as a non-cookie
field with all ``POST`` requests. If the two do not match, we
reject the form submission as a potential forgery.
The ``_xsrf`` value may be set as either a form field named ``_xsrf``
or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
``X-Requested-With: XMLHTTPRequest`` was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
.. versionchanged:: 3.2.2
Added support for cookie version 2. Both versions 1 and 2 are
supported.
"""
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML ``<input/>`` element to be included with all POST forms.
It defines the ``_xsrf`` input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the ``xsrf_cookies`` application setting, you must include this
HTML within all of your HTML forms.
In a template, this method should be called with ``{% module
xsrf_form_html() %}``
See `check_xsrf_cookie()` above for more information.
"""
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
"""Returns a static URL for the given relative static file path.
This method requires you set the ``static_path`` setting in your
application (which specifies the root directory of your static
files).
This method returns a versioned url (by default appending
``?v=<signature>``), which allows the static files to be
cached indefinitely. This can be disabled by passing
``include_version=False`` (in the default implementation;
other static file implementations are not required to support
this, but they may support other options).
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
By default uses a hash of the content written so far.
May be overridden to provide custom etag implementations,
or may return None to disable tornado's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
"""Sets the response's Etag header using ``self.compute_etag()``.
Note: no header will be set if ``compute_etag()`` returns ``None``.
This method is called automatically when the request is finished.
"""
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
"""Checks the ``Etag`` header against requests's ``If-None-Match``.
Returns ``True`` if the request's Etag matches and a 304 should be
returned. For example::
self.set_etag_header()
if self.check_etag_header():
self.set_status(304)
return
This method is called automatically when the request is finished,
but may be called earlier for applications that override
`compute_etag` and want to do an early check for ``If-None-Match``
before completing the request. The ``Etag`` header should be set
(perhaps with `set_etag_header`) before calling this method.
"""
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"',
utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b'*':
match = True
else:
# Use a weak comparison when comparing entity-tags.
def val(x):
return x[2:] if x.startswith(b'W/') else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if result is not None:
result = yield result
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
result = yield result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
"""Implement this method to handle streamed request data.
Requires the `.stream_request_body` decorator.
"""
raise NotImplementedError()
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return "%s %s (%s)" % (self.request.method, self.request.uri,
self.request.remote_ip)
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
"""Override to customize logging of uncaught exceptions.
By default logs instances of `HTTPError` as warnings without
stack traces (on the ``tornado.general`` logger), and all
other exceptions as errors with stack traces (on the
``tornado.application`` logger).
.. versionadded:: 3.1
"""
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
This decorator is for callback-style asynchronous methods; for
coroutines, use the ``@gen.coroutine`` decorator without
``@asynchronous``. (It is legal for legacy reasons to use the two
decorators together provided ``@asynchronous`` is first, but
``@asynchronous`` will be ignored in this case)
This decorator should only be applied to the :ref:`HTTP verb
methods <verbs>`; its behavior is undefined for any other method.
This decorator does not *make* a method asynchronous; it tells
the framework that the method *is* asynchronous. For this decorator
to be useful the method must (at least sometimes) do something
asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call
`self.finish() <RequestHandler.finish>` to finish the HTTP
request. Without this decorator, the request is automatically
finished when the ``get()`` or ``post()`` method returns. Example:
.. testcode::
class MyRequestHandler(RequestHandler):
@asynchronous
def get(self):
http = httpclient.AsyncHTTPClient()
http.fetch("http://friendfeed.com/", self._on_download)
def _on_download(self, response):
self.write("Downloaded!")
self.finish()
.. testoutput::
:hide:
.. versionchanged:: 3.1
The ability to use ``@gen.coroutine`` without ``@asynchronous``.
.. versionchanged:: 4.3 Returning anything but ``None`` or a
yieldable object from a method decorated with ``@asynchronous``
is an error. Such return values were previously ignored silently.
"""
# Delay the IOLoop import because it's not available on app engine.
from salt.ext.tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if result is not None:
result = gen.convert_yielded(result)
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
"""Apply to `RequestHandler` subclasses to enable streaming body support.
This decorator implies the following changes:
* `.HTTPServerRequest.body` is undefined, and body arguments will not
be included in `RequestHandler.get_argument`.
* `RequestHandler.prepare` is called when the request headers have been
read instead of after the entire body has been read.
* The subclass must define a method ``data_received(self, data):``, which
will be called zero or more times as data is available. Note that
if the request has an empty body, ``data_received`` may not be called.
* ``prepare`` and ``data_received`` may return Futures (such as via
``@gen.coroutine``, in which case the next method will not be called
until those futures have completed.
* The regular HTTP method (``post``, ``put``, etc) will be called after
the entire body has been read.
See the `file receiver demo <https://github.com/tornadoweb/tornado/tree/master/demos/file_upload/>`_
for example usage.
"""
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``/foo/`` would redirect to ``/foo`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to ``/foo`` would redirect to ``/foo/`` with this
decorator. Your request handler mapping should use a regular expression
like ``r'/foo/?'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class _ApplicationRouter(ReversibleRuleRouter):
"""Routing implementation used internally by `Application`.
Provides a binding between `Application` and `RequestHandler`.
This implementation extends `~.routing.ReversibleRuleRouter` in a couple of ways:
* it allows to use `RequestHandler` subclasses as `~.routing.Rule` target and
* it allows to use a list/tuple of rules as `~.routing.Rule` target.
``process_rule`` implementation will substitute this list with an appropriate
`_ApplicationRouter` instance.
"""
def __init__(self, application, rules=None):
assert isinstance(application, Application)
self.application = application
super(_ApplicationRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(_ApplicationRouter, self).process_rule(rule)
if isinstance(rule.target, (list, tuple)):
rule.target = _ApplicationRouter(self.application, rule.target)
return rule
def get_target_delegate(self, target, request, **target_params):
if isclass(target) and issubclass(target, RequestHandler):
return self.application.get_handler_delegate(request, target, **target_params)
return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
class Application(ReversibleRouter):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
http_server = httpserver.HTTPServer(application)
http_server.listen(8080)
ioloop.IOLoop.current().start()
The constructor for this class takes in a list of `~.routing.Rule`
objects or tuples of values corresponding to the arguments of
`~.routing.Rule` constructor: ``(matcher, target, [target_kwargs], [name])``,
the values in square brackets being optional. The default matcher is
`~.routing.PathMatches`, so ``(regexp, target)`` tuples can also be used
instead of ``(PathMatches(regexp), target)``.
A common routing target is a `RequestHandler` subclass, but you can also
use lists of rules as a target, which create a nested routing configuration::
application = web.Application([
(HostMatches("example.com"), [
(r"/", MainPageHandler),
(r"/feed", FeedHandler),
]),
])
In addition to this you can use nested `~.routing.Router` instances,
`~.httputil.HTTPMessageDelegate` subclasses and callables as routing targets
(see `~.routing` module docs for more information).
When we receive requests, we iterate over the list in order and
instantiate an instance of the first request class whose regexp
matches the request path. The request class can be specified as
either a class object or a (fully-qualified) name.
A dictionary may be passed as the third element (``target_kwargs``)
of the tuple, which will be used as keyword arguments to the handler's
constructor and `~RequestHandler.initialize` method. This pattern
is used for the `StaticFileHandler` in this example (note that a
`StaticFileHandler` can be installed automatically with the
static_path setting described below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the `add_handlers` method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
If there's no match for the current request's host, then ``default_host``
parameter value is matched against host regular expressions.
You can serve static files by sending the ``static_path`` setting
as a keyword argument. We will serve those files from the
``/static/`` URI (this is configurable with the
``static_url_prefix`` setting), and we will serve ``/favicon.ico``
and ``/robots.txt`` from the same directory. A custom subclass of
`StaticFileHandler` can be specified with the
``static_handler_class`` setting.
.. versionchanged:: 4.5
Integration with the new `tornado.routing` module.
"""
def __init__(self, handlers=None, default_host=None, transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
self.wildcard_router = _ApplicationRouter(self, handlers)
self.default_router = _ApplicationRouter(self, [
Rule(AnyMatches(), self.wildcard_router)
])
# Automatically reload modified modules
if self.settings.get('autoreload'):
from salt.ext.tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
"""Starts an HTTP server for this application on the given port.
This is a convenience alias for creating an `.HTTPServer`
object and calling its listen method. Keyword arguments not
supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
`.HTTPServer` constructor. For advanced uses
(e.g. multi-process mode), do not use this method; create an
`.HTTPServer` and call its
`.TCPServer.bind`/`.TCPServer.start` methods directly.
Note that after calling this method you still need to call
``IOLoop.current().start()`` to start the server.
Returns the `.HTTPServer` object.
.. versionchanged:: 4.3
Now returns the `.HTTPServer` object.
"""
# import is here rather than top level because HTTPServer
# is not importable on appengine
from salt.ext.tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
return server
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules([(
DefaultHostMatches(self, host_matcher.host_pattern),
host_handlers
)])
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = self.find_handler(request)
return dispatcher.execute()
def find_handler(self, request, **kwargs):
route = self.default_router.find_handler(request)
if route is not None:
return route
if self.settings.get('default_handler_class'):
return self.get_handler_delegate(
request,
self.settings['default_handler_class'],
self.settings.get('default_handler_args', {}))
return self.get_handler_delegate(
request, ErrorHandler, {'status_code': 404})
def get_handler_delegate(self, request, target_class, target_kwargs=None,
path_args=None, path_kwargs=None):
"""Returns `~.httputil.HTTPMessageDelegate` that can serve a request
for application and `RequestHandler` subclass.
:arg httputil.HTTPServerRequest request: current HTTP request.
:arg RequestHandler target_class: a `RequestHandler` class.
:arg dict target_kwargs: keyword arguments for ``target_class`` constructor.
:arg list path_args: positional arguments for ``target_class`` HTTP method that
will be executed while handling a request (``get``, ``post`` or any other).
:arg dict path_kwargs: keyword arguments for ``target_class`` HTTP method.
"""
return _HandlerDelegate(
self, request, target_class, target_kwargs, path_args, path_kwargs)
def reverse_url(self, name, *args):
"""Returns a URL path for handler named ``name``
The handler must be added to the application as a named `URLSpec`.
Args will be substituted for capturing groups in the `URLSpec` regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
reversed_url = self.default_router.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
``log_function``.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _HandlerDelegate(httputil.HTTPMessageDelegate):
def __init__(self, application, request, handler_class, handler_kwargs,
path_args, path_kwargs):
self.application = application
self.connection = request.connection
self.request = request
self.handler_class = handler_class
self.handler_kwargs = handler_kwargs or {}
self.path_args = path_args or []
self.path_kwargs = path_kwargs or {}
self.chunks = []
self.stream_request_body = _has_stream_request_body(self.handler_class)
def headers_received(self, start_line, headers):
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
self.handler._execute(transforms, *self.path_args,
**self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
Raising an `HTTPError` is a convenient alternative to calling
`RequestHandler.send_error` since it automatically ends the
current function.
To customize the response sent with an `HTTPError`, override
`RequestHandler.write_error`.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses <http.client.responses>` unless the ``reason``
keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code=500, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
if log_message and not args:
self.log_message = log_message.replace('%', '%%')
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
"""An exception that ends the request without producing an error response.
When `Finish` is raised in a `RequestHandler`, the request will
end (calling `RequestHandler.finish` if it hasn't already been
called), but the error-handling methods (including
`RequestHandler.write_error`) will not be called.
If `Finish()` was created with no arguments, the pending response
will be sent as-is. If `Finish()` was given an argument, that
argument will be passed to `RequestHandler.finish()`.
This can be a more convenient way to implement custom error pages
than overriding ``write_error`` (especially in library code)::
if self.current_user is None:
self.set_status(401)
self.set_header('WWW-Authenticate', 'Basic realm="something"')
raise Finish()
.. versionchanged:: 4.3
Arguments passed to ``Finish()`` will be passed on to
`RequestHandler.finish`.
"""
pass
class MissingArgumentError(HTTPError):
"""Exception raised by `RequestHandler.get_argument`.
This is a subclass of `HTTPError`, so if it is uncaught a 400 response
code will be used instead of 500 (and a stack trace will not be logged).
.. versionadded:: 3.1
"""
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument ``url`` to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
`RedirectHandler` supports regular expression substitutions. E.g., to
swap the first and second parts of a path while preserving the remainder::
application = web.Application([
(r"/(.*?)/(.*?)/(.*)", web.RedirectHandler, {"url": "/{1}/{0}/{2}"}),
])
The final URL is formatted with `str.format` and the substrings that match
the capturing groups. In the above example, a request to "/a/b/c" would be
formatted like::
str.format("/{1}/{0}/{2}", "a", "b", "c") # -> "/b/a/c"
Use Python's :ref:`format string syntax <formatstrings>` to customize how
values are substituted.
.. versionchanged:: 4.5
Added support for substitutions into the destination URL.
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self, *args):
self.redirect(self._url.format(*args), permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To serve a file like ``index.html`` automatically when a directory is
requested, set ``static_handler_args=dict(default_filename="index.html")``
in your application settings, or add ``default_filename`` as an initializer
argument for your ``StaticFileHandler``.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video).
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {} # type: typing.Dict
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified,
content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
# os.path.abspath strips a trailing /.
# We must add it back to `root` so that we only match files
# in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root)
if not root.endswith(os.path.sep):
# abspath always removes a trailing slash, except when
# root is '/'. This is an unusual case, but several projects
# have independently discovered this technique to disable
# Tornado's path validation and (hopefully) do their own,
# so we need to support it.
root += os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
"""
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app),
])
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
Applications are not expected to create their own OutputTransforms
or interact with them directly; the framework chooses which transforms
(if any) to apply.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml",
"image/svg+xml"])
# Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
# TODO: can/should this type be inherited from the superclass?
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'
else:
headers['Vary'] = 'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
compresslevel=self.GZIP_LEVEL)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A re-usable, modular UI unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
Subclasses of UIModule must override the `render` method.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
"""Override in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Override to return a JavaScript string
to be embedded in the page."""
return None
def javascript_files(self):
"""Override to return a list of JavaScript files needed by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def embedded_css(self):
"""Override to return a CSS string
that will be embedded in the page."""
return None
def css_files(self):
"""Override to returns a list of CSS files required by this module.
If the return values are relative paths, they will be passed to
`RequestHandler.static_url`; otherwise they will be used as-is.
"""
return None
def html_head(self):
"""Override to return an HTML string that will be put in the <head/>
element.
"""
return None
def html_body(self):
"""Override to return an HTML string that will be put at the end of
the <body/> element.
"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
"""Lazy namespace which creates UIModule proxies bound to a handler."""
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None,
key_version=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (integer, default is 0)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2",
format_field(str(key_version or 0)),
format_field(timestamp),
format_field(name),
format_field(value),
b''])
if isinstance(secret, dict):
assert key_version is not None, 'Key version must be set when sign key dict is used'
assert version >= 2, 'Version must be at least 2 for key version support'
secret = secret[key_version]
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal
# with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def _get_version(value):
# Figures out what version value is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
return version
def decode_signed_value(secret, name, value, max_age_days=31,
clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
value = utf8(value)
version = _get_version(value)
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value,
max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value,
max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r",
value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_fields_v2(value):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, passed_sig = _consume_field(rest)
return int(key_version), timestamp, name_field, value_field, passed_sig
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
try:
key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
except ValueError:
return None
signed_string = value[:-len(passed_sig)]
if isinstance(secret, dict):
try:
secret = secret[key_version]
except KeyError:
return None
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def get_signature_key_version(value):
value = utf8(value)
version = _get_version(value)
if version < 2:
return None
try:
key_version, _, _, _, _ = _decode_fields_v2(value)
except ValueError:
return None
return key_version
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
| 39.439173 | 153 | 0.61757 |
from __future__ import absolute_import, division, print_function
import base64
import binascii
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import salt.ext.tornado as tornado
import traceback
import types
from inspect import isclass
from io import BytesIO
from salt.ext.tornado.concurrent import Future
from salt.ext.tornado import escape
from salt.ext.tornado import gen
from salt.ext.tornado import httputil
from salt.ext.tornado import iostream
from salt.ext.tornado import locale
from salt.ext.tornado.log import access_log, app_log, gen_log
from salt.ext.tornado import stack_context
from salt.ext.tornado import template
from salt.ext.tornado.escape import utf8, _unicode
from salt.ext.tornado.routing import (AnyMatches, DefaultHostMatches, HostMatches,
ReversibleRouter, Rule, ReversibleRuleRouter,
URLSpec)
from salt.ext.tornado.util import (ObjectDict, raise_exc_info,
unicode_type, _websocket_mask, PY3)
url = URLSpec
if PY3:
import http.cookies as Cookie
import urllib.parse as urlparse
from urllib.parse import urlencode
else:
import Cookie
import urlparse
from urllib import urlencode
try:
import typing
_HeaderTypes = typing.Union[bytes, unicode_type,
numbers.Integral, datetime.datetime]
except ImportError:
pass
MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1
MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2
DEFAULT_SIGNED_VALUE_VERSION = 2
DEFAULT_SIGNED_VALUE_MIN_VERSION = 1
class RequestHandler(object):
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
_template_loaders = {}
_template_loader_lock = threading.Lock()
_remove_control_chars_regex = re.compile(r"[\x00-\x08\x0e-\x1f]")
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None
self._prepared_future = None
self._headers = None
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
self.ui["_tt_modules"] = _UIModuleNamespace(self,
application.ui_modules)
self.ui["modules"] = self.ui["_tt_modules"]
self.clear()
self.request.connection.set_close_callback(self.on_connection_close)
self.initialize(**kwargs)
def initialize(self):
pass
@property
def settings(self):
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
pass
def on_finish(self):
pass
def on_connection_close(self):
if _has_stream_request_body(self.__class__):
if not self.request.body.done():
self.request.body.set_exception(iostream.StreamClosedError())
self.request.body.exception()
def clear(self):
self._headers = httputil.HTTPHeaders({
"Server": "TornadoServer/%s" % tornado.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": httputil.format_timestamp(time.time()),
})
self.set_default_headers()
self._write_buffer = []
self._status_code = 200
self._reason = httputil.responses[200]
def set_default_headers(self):
pass
def set_status(self, status_code, reason=None):
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httputil.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d" % status_code)
def get_status(self):
return self._status_code
def set_header(self, name, value):
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
self._headers.add(name, self._convert_header_value(value))
def clear_header(self, name):
if name in self._headers:
del self._headers[name]
_INVALID_HEADER_CHAR_RE = re.compile(r"[\x00-\x1f]")
def _convert_header_value(self, value):
if isinstance(value, str):
retval = value
elif isinstance(value, bytes):
retval = value.decode('latin1')
elif isinstance(value, unicode_type):
retval = escape.utf8(value)
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
return httputil.format_timestamp(value)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request.
if RequestHandler._INVALID_HEADER_CHAR_RE.search(retval):
raise ValueError("Unsafe header value %r", retval)
return retval
_ARG_DEFAULT = object()
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
return self._get_argument(name, default, self.request.arguments, strip)
def get_arguments(self, name, strip=True):
# Make sure `get_arguments` isn't accidentally being called with a
# `get_argument`.)
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def get_body_argument(self, name, default=_ARG_DEFAULT, strip=True):
return self._get_argument(name, default, self.request.body_arguments,
strip)
def get_body_arguments(self, name, strip=True):
return self._get_arguments(name, self.request.body_arguments, strip)
def get_query_argument(self, name, default=_ARG_DEFAULT, strip=True):
return self._get_argument(name, default,
self.request.query_arguments, strip)
def get_query_arguments(self, name, strip=True):
return self._get_arguments(name, self.request.query_arguments, strip)
def _get_argument(self, name, default, source, strip=True):
args = self._get_arguments(name, source, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise MissingArgumentError(name)
return default
return args[-1]
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = RequestHandler._remove_control_chars_regex.sub(" ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
try:
return _unicode(value)
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" %
(name or "url", value[:40]))
@property
def cookies(self):
return self.request.cookies
def get_cookie(self, name, default=None):
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = httputil.format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self, path="/", domain=None):
for name in self.request.cookies:
self.clear_cookie(name, path=path, domain=domain)
def set_secure_cookie(self, name, value, expires_days=30, version=None,
**kwargs):
self.set_cookie(name, self.create_signed_value(name, value,
version=version),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value, version=None):
self.require_setting("cookie_secret", "secure cookies")
secret = self.application.settings["cookie_secret"]
key_version = None
if isinstance(secret, dict):
if self.application.settings.get("key_version") is None:
raise Exception("key_version setting must be used for secret_key dicts")
key_version = self.application.settings["key_version"]
return create_signed_value(secret, name, value, version=version,
key_version=key_version)
def get_secure_cookie(self, name, value=None, max_age_days=31,
min_version=None):
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days,
min_version=min_version)
def get_secure_cookie_key_version(self, name, value=None):
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return get_signature_key_version(value)
def redirect(self, url, permanent=False, status=None):
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", utf8(url))
self.finish()
def write(self, chunk):
if self._finished:
raise RuntimeError("Cannot write() after finish()")
if not isinstance(chunk, (bytes, unicode_type, dict)):
message = "write() only accepts bytes, unicode, and dict objects"
if isinstance(chunk, list):
message += ". Lists not accepted for security reasons; see http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
raise TypeError(message)
if isinstance(chunk, dict):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
if js_files:
js = self.render_linked_js(js_files)
sloc = html.rindex(b'</body>')
html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
if js_embed:
js = self.render_embed_js(js_embed)
sloc = html.rindex(b'</body>')
html = html[:sloc] + js + b'\n' + html[sloc:]
if css_files:
css = self.render_linked_css(css_files)
hloc = html.index(b'</head>')
html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
if css_embed:
css = self.render_embed_css(css_embed)
hloc = html.index(b'</head>')
html = html[:hloc] + css + b'\n' + html[hloc:]
if html_heads:
hloc = html.index(b'</head>')
html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
self.finish(html)
def render_linked_js(self, js_files):
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
def render_embed_js(self, js_embed):
return b'<script type="text/javascript">\n//<![CDATA[\n' + \
b'\n'.join(js_embed) + b'\n//]]>\n</script>'
def render_linked_css(self, css_files):
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
def render_embed_css(self, css_embed):
return b'<style type="text/css">\n' + b'\n'.join(css_embed) + \
b'\n</style>'
def render_string(self, template_name, **kwargs):
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
pgettext=self.locale.pgettext,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
kwargs["autoescape"] = settings["autoescape"]
if "template_whitespace" in settings:
kwargs["whitespace"] = settings["template_whitespace"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False, callback=None):
chunk = b"".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers,
chunk, include_footers)
if self.request.method == "HEAD":
chunk = None
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
self.add_header("Set-Cookie", cookie.OutputString(None))
start_line = httputil.ResponseStartLine('',
self._status_code,
self._reason)
return self.request.connection.write_headers(
start_line, self._headers, chunk, callback=callback)
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
if self.request.method != "HEAD":
return self.request.connection.write(chunk, callback=callback)
else:
future = Future()
future.set_result(None)
return future
def finish(self, chunk=None):
if self._finished:
raise RuntimeError("finish() called twice")
if chunk is not None:
self.write(chunk)
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
self.set_etag_header()
if self.check_etag_header():
self._write_buffer = []
self.set_status(304)
if (self._status_code in (204, 304) or
(self._status_code >= 100 and self._status_code < 200)):
assert not self._write_buffer, "Cannot send body with %s" % self._status_code
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
if hasattr(self.request, "connection"):
self.request.connection.set_close_callback(None)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
def _break_cycles(self):
self.ui = None
def send_error(self, status_code=500, **kwargs):
if self._headers_written:
gen_log.error("Cannot send error response after headers written")
if not self._finished:
try:
self.finish()
except Exception:
gen_log.error("Failed to flush partial response",
exc_info=True)
return
self.clear()
reason = kwargs.get('reason')
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, HTTPError) and exception.reason:
reason = exception.reason
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception:
app_log.error("Uncaught exception in write_error", exc_info=True)
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" % {
"code": status_code,
"message": self._reason,
})
@property
def locale(self):
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
@locale.setter
def locale(self, value):
self._locale = value
def get_user_locale(self):
return None
def get_browser_locale(self, default="en_US"):
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
@current_user.setter
def current_user(self, value):
self._current_user = value
def get_current_user(self):
return None
def get_login_url(self):
self.require_setting("login_url", "@tornado.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
if not hasattr(self, "_xsrf_token"):
version, token, timestamp = self._get_raw_xsrf_token()
output_version = self.settings.get("xsrf_cookie_version", 2)
cookie_kwargs = self.settings.get("xsrf_cookie_kwargs", {})
if output_version == 1:
self._xsrf_token = binascii.b2a_hex(token)
elif output_version == 2:
mask = os.urandom(4)
self._xsrf_token = b"|".join([
b"2",
binascii.b2a_hex(mask),
binascii.b2a_hex(_websocket_mask(mask, token)),
utf8(str(int(timestamp)))])
else:
raise ValueError("unknown xsrf cookie version %d",
output_version)
if version is None:
expires_days = 30 if self.current_user else None
self.set_cookie("_xsrf", self._xsrf_token,
expires_days=expires_days,
**cookie_kwargs)
return self._xsrf_token
def _get_raw_xsrf_token(self):
if not hasattr(self, '_raw_xsrf_token'):
cookie = self.get_cookie("_xsrf")
if cookie:
version, token, timestamp = self._decode_xsrf_token(cookie)
else:
version, token, timestamp = None, None, None
if token is None:
version = None
token = os.urandom(16)
timestamp = time.time()
self._raw_xsrf_token = (version, token, timestamp)
return self._raw_xsrf_token
def _decode_xsrf_token(self, cookie):
try:
m = _signed_value_version_re.match(utf8(cookie))
if m:
version = int(m.group(1))
if version == 2:
_, mask, masked_token, timestamp = cookie.split("|")
mask = binascii.a2b_hex(utf8(mask))
token = _websocket_mask(
mask, binascii.a2b_hex(utf8(masked_token)))
timestamp = int(timestamp)
return version, token, timestamp
else:
raise Exception("Unknown xsrf cookie version")
else:
version = 1
try:
token = binascii.a2b_hex(utf8(cookie))
except (binascii.Error, TypeError):
token = utf8(cookie)
timestamp = int(time.time())
return (version, token, timestamp)
except Exception:
# Catch exceptions and return nothing instead of failing.
gen_log.debug("Uncaught exception in _decode_xsrf_token",
exc_info=True)
return None, None, None
def check_xsrf_cookie(self):
token = (self.get_argument("_xsrf", None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
_, token, _ = self._decode_xsrf_token(token)
_, expected_token, _ = self._get_raw_xsrf_token()
if not token:
raise HTTPError(403, "'_xsrf' argument has invalid format")
if not _time_independent_equals(utf8(token), utf8(expected_token)):
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
return '<input type="hidden" name="_xsrf" value="' + \
escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None, **kwargs):
self.require_setting("static_path", "static_url")
get_url = self.settings.get("static_handler_class",
StaticFileHandler).make_static_url
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host
else:
base = ""
return base + get_url(self.settings, path, **kwargs)
def require_setting(self, name, feature="this feature"):
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
return self.application.reverse_url(name, *args)
def compute_etag(self):
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"%s"' % hasher.hexdigest()
def set_etag_header(self):
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
def check_etag_header(self):
computed_etag = utf8(self._headers.get("Etag", ""))
# Find all weak and strong etag values from If-None-Match header
# because RFC 7232 allows multiple etag values in a single header.
etags = re.findall(
br'\*|(?:W/)?"[^"]*"',
utf8(self.request.headers.get("If-None-Match", ""))
)
if not computed_etag or not etags:
return False
match = False
if etags[0] == b'*':
match = True
else:
# Use a weak comparison when comparing entity-tags.
def val(x):
return x[2:] if x.startswith(b'W/') else x
for etag in etags:
if val(etag) == val(computed_etag):
match = True
break
return match
def _stack_context_handle_exception(self, type, value, traceback):
try:
# For historical reasons _handle_request_exception only takes
# the exception value instead of the full triple,
# so re-raise the exception to ensure that it's in
# sys.exc_info()
raise_exc_info((type, value, traceback))
except Exception:
self._handle_request_exception(value)
return True
@gen.coroutine
def _execute(self, transforms, *args, **kwargs):
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"):
self.check_xsrf_cookie()
result = self.prepare()
if result is not None:
result = yield result
if self._prepared_future is not None:
# Tell the Application we've finished with prepare()
# and are ready for the body to arrive.
self._prepared_future.set_result(None)
if self._finished:
return
if _has_stream_request_body(self.__class__):
# In streaming mode request.body is a Future that signals
# the body has been completely received. The Future has no
# result; the data has been passed to self.data_received
# instead.
try:
yield self.request.body
except iostream.StreamClosedError:
return
method = getattr(self, self.request.method.lower())
result = method(*self.path_args, **self.path_kwargs)
if result is not None:
result = yield result
if self._auto_finish and not self._finished:
self.finish()
except Exception as e:
try:
self._handle_request_exception(e)
except Exception:
app_log.error("Exception in exception handler", exc_info=True)
if (self._prepared_future is not None and
not self._prepared_future.done()):
# In case we failed before setting _prepared_future, do it
# now (to unblock the HTTP server). Note that this is not
# in a finally block to avoid GC issues prior to Python 3.4.
self._prepared_future.set_result(None)
def data_received(self, chunk):
raise NotImplementedError()
def _log(self):
self.application.log_request(self)
def _request_summary(self):
return "%s %s (%s)" % (self.request.method, self.request.uri,
self.request.remote_ip)
def _handle_request_exception(self, e):
if isinstance(e, Finish):
# Not an error; just finish the request without logging.
if not self._finished:
self.finish(*e.args)
return
try:
self.log_exception(*sys.exc_info())
except Exception:
# An error here should still get a best-effort send_error()
# to avoid leaking the connection.
app_log.error("Error in exception logger", exc_info=True)
if self._finished:
# Extra errors after the request has been finished should
# be logged, but there is no reason to continue to try and
# send a response.
return
if isinstance(e, HTTPError):
if e.status_code not in httputil.responses and not e.reason:
gen_log.error("Bad HTTP status code: %d", e.status_code)
self.send_error(500, exc_info=sys.exc_info())
else:
self.send_error(e.status_code, exc_info=sys.exc_info())
else:
self.send_error(500, exc_info=sys.exc_info())
def log_exception(self, typ, value, tb):
if isinstance(value, HTTPError):
if value.log_message:
format = "%d %s: " + value.log_message
args = ([value.status_code, self._request_summary()] +
list(value.args))
gen_log.warning(format, *args)
else:
app_log.error("Uncaught exception %s\n%r", self._request_summary(),
self.request, exc_info=(typ, value, tb))
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
# Delay the IOLoop import because it's not available on app engine.
from salt.ext.tornado.ioloop import IOLoop
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
with stack_context.ExceptionStackContext(
self._stack_context_handle_exception):
result = method(self, *args, **kwargs)
if result is not None:
result = gen.convert_yielded(result)
# If @asynchronous is used with @gen.coroutine, (but
# not @gen.engine), we can automatically finish the
# request when the future resolves. Additionally,
# the Future will swallow any exceptions so we need
# to throw them back out to the stack context to finish
# the request.
def future_complete(f):
f.result()
if not self._finished:
self.finish()
IOLoop.current().add_future(result, future_complete)
# Once we have done this, hide the Future from our
# caller (i.e. RequestHandler._when_complete), which
# would otherwise set up its own callback and
# exception handler (resulting in exceptions being
# logged twice).
return None
return result
return wrapper
def stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
cls._stream_request_body = True
return cls
def _has_stream_request_body(cls):
if not issubclass(cls, RequestHandler):
raise TypeError("expected subclass of RequestHandler, got %r", cls)
return getattr(cls, '_stream_request_body', False)
def removeslash(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD"):
uri = self.request.path + "/"
if self.request.query:
uri += "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class _ApplicationRouter(ReversibleRuleRouter):
def __init__(self, application, rules=None):
assert isinstance(application, Application)
self.application = application
super(_ApplicationRouter, self).__init__(rules)
def process_rule(self, rule):
rule = super(_ApplicationRouter, self).process_rule(rule)
if isinstance(rule.target, (list, tuple)):
rule.target = _ApplicationRouter(self.application, rule.target)
return rule
def get_target_delegate(self, target, request, **target_params):
if isclass(target) and issubclass(target, RequestHandler):
return self.application.get_handler_delegate(request, target, **target_params)
return super(_ApplicationRouter, self).get_target_delegate(target, request, **target_params)
class Application(ReversibleRouter):
def __init__(self, handlers=None, default_host=None, transforms=None,
**settings):
if transforms is None:
self.transforms = []
if settings.get("compress_response") or settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
else:
self.transforms = transforms
self.default_host = default_host
self.settings = settings
self.ui_modules = {'linkify': _linkify,
'xsrf_form_html': _xsrf_form_html,
'Template': TemplateModule,
}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if self.settings.get("static_path"):
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args['path'] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if self.settings.get('debug'):
self.settings.setdefault('autoreload', True)
self.settings.setdefault('compiled_template_cache', False)
self.settings.setdefault('static_hash_cache', False)
self.settings.setdefault('serve_traceback', True)
self.wildcard_router = _ApplicationRouter(self, handlers)
self.default_router = _ApplicationRouter(self, [
Rule(AnyMatches(), self.wildcard_router)
])
# Automatically reload modified modules
if self.settings.get('autoreload'):
from salt.ext.tornado import autoreload
autoreload.start()
def listen(self, port, address="", **kwargs):
# import is here rather than top level because HTTPServer
# is not importable on appengine
from salt.ext.tornado.httpserver import HTTPServer
server = HTTPServer(self, **kwargs)
server.listen(port, address)
return server
def add_handlers(self, host_pattern, host_handlers):
host_matcher = HostMatches(host_pattern)
rule = Rule(host_matcher, _ApplicationRouter(self, host_handlers))
self.default_router.rules.insert(-1, rule)
if self.default_host is not None:
self.wildcard_router.add_rules([(
DefaultHostMatches(self, host_matcher.host_pattern),
host_handlers
)])
def add_transform(self, transform_class):
self.transforms.append(transform_class)
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, list):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, list):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, dict)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
# Legacy HTTPServer interface
dispatcher = self.find_handler(request)
return dispatcher.execute()
def find_handler(self, request, **kwargs):
route = self.default_router.find_handler(request)
if route is not None:
return route
if self.settings.get('default_handler_class'):
return self.get_handler_delegate(
request,
self.settings['default_handler_class'],
self.settings.get('default_handler_args', {}))
return self.get_handler_delegate(
request, ErrorHandler, {'status_code': 404})
def get_handler_delegate(self, request, target_class, target_kwargs=None,
path_args=None, path_kwargs=None):
return _HandlerDelegate(
self, request, target_class, target_kwargs, path_args, path_kwargs)
def reverse_url(self, name, *args):
reversed_url = self.default_router.reverse_url(name, *args)
if reversed_url is not None:
return reversed_url
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
if handler.get_status() < 400:
log_method = access_log.info
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
class _HandlerDelegate(httputil.HTTPMessageDelegate):
def __init__(self, application, request, handler_class, handler_kwargs,
path_args, path_kwargs):
self.application = application
self.connection = request.connection
self.request = request
self.handler_class = handler_class
self.handler_kwargs = handler_kwargs or {}
self.path_args = path_args or []
self.path_kwargs = path_kwargs or {}
self.chunks = []
self.stream_request_body = _has_stream_request_body(self.handler_class)
def headers_received(self, start_line, headers):
if self.stream_request_body:
self.request.body = Future()
return self.execute()
def data_received(self, data):
if self.stream_request_body:
return self.handler.data_received(data)
else:
self.chunks.append(data)
def finish(self):
if self.stream_request_body:
self.request.body.set_result(None)
else:
self.request.body = b''.join(self.chunks)
self.request._parse_body()
self.execute()
def on_connection_close(self):
if self.stream_request_body:
self.handler.on_connection_close()
else:
self.chunks = None
def execute(self):
# If template cache is disabled (usually in the debug mode),
# re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if not self.application.settings.get("compiled_template_cache", True):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
if not self.application.settings.get('static_hash_cache', True):
StaticFileHandler.reset()
self.handler = self.handler_class(self.application, self.request,
**self.handler_kwargs)
transforms = [t(self.request) for t in self.application.transforms]
if self.stream_request_body:
self.handler._prepared_future = Future()
# Note that if an exception escapes handler._execute it will be
# trapped in the Future it returns (which we are ignoring here,
# leaving it to be logged when the Future is GC'd).
# However, that shouldn't happen because _execute has a blanket
# except handler, and we cannot easily access the IOLoop here to
# call add_future (because of the requirement to remain compatible
# with WSGI)
self.handler._execute(transforms, *self.path_args,
**self.path_kwargs)
# If we are streaming the request body, then execute() is finished
# when the handler has prepared to receive the body. If not,
# it doesn't matter when execute() finishes (so we return None)
return self.handler._prepared_future
class HTTPError(Exception):
def __init__(self, status_code=500, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get('reason', None)
if log_message and not args:
self.log_message = log_message.replace('%', '%%')
def __str__(self):
message = "HTTP %d: %s" % (
self.status_code,
self.reason or httputil.responses.get(self.status_code, 'Unknown'))
if self.log_message:
return message + " (" + (self.log_message % self.args) + ")"
else:
return message
class Finish(Exception):
pass
class MissingArgumentError(HTTPError):
def __init__(self, arg_name):
super(MissingArgumentError, self).__init__(
400, 'Missing argument %s' % arg_name)
self.arg_name = arg_name
class ErrorHandler(RequestHandler):
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self, *args):
self.redirect(self._url.format(*args), permanent=self._permanent)
class StaticFileHandler(RequestHandler):
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {} # type: typing.Dict
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
return self.get(path, include_body=False)
@gen.coroutine
def get(self, path, include_body=True):
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(
self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self):
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return '"%s"' % (version_hash, )
def set_headers(self):
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified,
content_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() +
datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self):
if self.check_etag_header():
return True
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if date_tuple is not None:
if_since = datetime.datetime(*date_tuple[:6])
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root, path):
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root, absolute_path):
# os.path.abspath strips a trailing /.
# We must add it back to `root` so that we only match files
# in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root)
if not root.endswith(os.path.sep):
# abspath always removes a trailing slash, except when
# root is '/'. This is an unusual case, but several projects
# have independently discovered this technique to disable
# Tornado's path validation and (hopefully) do their own,
# so we need to support it.
root += os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory",
self.path)
if (os.path.isdir(absolute_path) and
self.default_filename is not None):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect(self.request.path + "/", permanent=True)
return
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(cls, abspath, start=None, end=None):
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath):
data = cls.get_content(abspath)
hasher = hashlib.md5()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self):
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_modified_time(self):
stat_result = self._stat()
modified = datetime.datetime.utcfromtimestamp(
stat_result[stat.ST_MTIME])
return modified
def get_content_type(self):
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path):
pass
def get_cache_time(self, path, modified, mime_type):
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path, include_version=True):
url = settings.get('static_url_prefix', '/static/') + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return '%s?v=%s' % (url, version_hash)
def parse_url_path(self, url_path):
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings, path):
abs_path = cls.get_absolute_path(settings['static_path'], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path):
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
class FallbackHandler(RequestHandler):
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"application/json", "application/xhtml+xml",
"image/svg+xml"])
# Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request):
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype):
return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# type: (int, httputil.HTTPHeaders, bytes, bool) -> typing.Tuple[int, httputil.HTTPHeaders, bytes]
# TODO: can/should this type be inherited from the superclass?
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'
else:
headers['Vary'] = 'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = self._compressible_type(ctype) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value,
compresslevel=self.GZIP_LEVEL)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.locale = handler.locale
@property
def current_user(self):
return self.handler.current_user
def render(self, *args, **kwargs):
raise NotImplementedError()
def embedded_javascript(self):
return None
def javascript_files(self):
return None
def embedded_css(self):
return None
def css_files(self):
return None
def html_head(self):
return None
def html_body(self):
return None
def render_string(self, path, **kwargs):
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode_type, bytes)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class _UIModuleNamespace(object):
def __init__(self, handler, ui_modules):
self.handler = handler
self.ui_modules = ui_modules
def __getitem__(self, key):
return self.handler._ui_module(key, self.ui_modules[key])
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(str(e))
if hasattr(hmac, 'compare_digest'): # python 3.3
_time_independent_equals = hmac.compare_digest
else:
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value, version=None, clock=None,
key_version=None):
if version is None:
version = DEFAULT_SIGNED_VALUE_VERSION
if clock is None:
clock = time.time
timestamp = utf8(str(int(clock())))
value = base64.b64encode(utf8(value))
if version == 1:
signature = _create_signature_v1(secret, name, value, timestamp)
value = b"|".join([value, timestamp, signature])
return value
elif version == 2:
# The v2 format consists of a version number and a series of
# length-prefixed fields "%d:%s", the last of which is a
# signature, all separated by pipes. All numbers are in
# decimal format with no leading zeros. The signature is an
# HMAC-SHA256 of the whole string up to that point, including
# the final pipe.
#
# The fields are:
# - format version (i.e. 2; no length prefix)
# - key version (integer, default is 0)
# - timestamp (integer seconds since epoch)
# - name (not encoded; assumed to be ~alphanumeric)
# - value (base64-encoded)
# - signature (hex-encoded; no length prefix)
def format_field(s):
return utf8("%d:" % len(s)) + utf8(s)
to_sign = b"|".join([
b"2",
format_field(str(key_version or 0)),
format_field(timestamp),
format_field(name),
format_field(value),
b''])
if isinstance(secret, dict):
assert key_version is not None, 'Key version must be set when sign key dict is used'
assert version >= 2, 'Version must be at least 2 for key version support'
secret = secret[key_version]
signature = _create_signature_v2(secret, to_sign)
return to_sign + signature
else:
raise ValueError("Unsupported version %d" % version)
# A leading version number in decimal
# with no leading zeros, followed by a pipe.
_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
def _get_version(value):
# Figures out what version value is. Version 1 did not include an
# explicit version field and started with arbitrary base64 data,
# which makes this tricky.
m = _signed_value_version_re.match(value)
if m is None:
version = 1
else:
try:
version = int(m.group(1))
if version > 999:
# Certain payloads from the version-less v1 format may
# be parsed as valid integers. Due to base64 padding
# restrictions, this can only happen for numbers whose
# length is a multiple of 4, so we can treat all
# numbers up to 999 as versions, and for the rest we
# fall back to v1 format.
version = 1
except ValueError:
version = 1
return version
def decode_signed_value(secret, name, value, max_age_days=31,
clock=None, min_version=None):
if clock is None:
clock = time.time
if min_version is None:
min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
if min_version > 2:
raise ValueError("Unsupported min_version %d" % min_version)
if not value:
return None
value = utf8(value)
version = _get_version(value)
if version < min_version:
return None
if version == 1:
return _decode_signed_value_v1(secret, name, value,
max_age_days, clock)
elif version == 2:
return _decode_signed_value_v2(secret, name, value,
max_age_days, clock)
else:
return None
def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
parts = utf8(value).split(b"|")
if len(parts) != 3:
return None
signature = _create_signature_v1(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
gen_log.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < clock() - max_age_days * 86400:
gen_log.warning("Expired cookie %r", value)
return None
if timestamp > clock() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
gen_log.warning("Cookie timestamp in future; possible tampering %r",
value)
return None
if parts[1].startswith(b"0"):
gen_log.warning("Tampered cookie %r", value)
return None
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _decode_fields_v2(value):
def _consume_field(s):
length, _, rest = s.partition(b':')
n = int(length)
field_value = rest[:n]
# In python 3, indexing bytes returns small integers; we must
# use a slice to get a byte string as in python 2.
if rest[n:n + 1] != b'|':
raise ValueError("malformed v2 signed value field")
rest = rest[n + 1:]
return field_value, rest
rest = value[2:] # remove version number
key_version, rest = _consume_field(rest)
timestamp, rest = _consume_field(rest)
name_field, rest = _consume_field(rest)
value_field, passed_sig = _consume_field(rest)
return int(key_version), timestamp, name_field, value_field, passed_sig
def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
try:
key_version, timestamp, name_field, value_field, passed_sig = _decode_fields_v2(value)
except ValueError:
return None
signed_string = value[:-len(passed_sig)]
if isinstance(secret, dict):
try:
secret = secret[key_version]
except KeyError:
return None
expected_sig = _create_signature_v2(secret, signed_string)
if not _time_independent_equals(passed_sig, expected_sig):
return None
if name_field != utf8(name):
return None
timestamp = int(timestamp)
if timestamp < clock() - max_age_days * 86400:
# The signature has expired.
return None
try:
return base64.b64decode(value_field)
except Exception:
return None
def get_signature_key_version(value):
value = utf8(value)
version = _get_version(value)
if version < 2:
return None
try:
key_version, _, _, _, _ = _decode_fields_v2(value)
except ValueError:
return None
return key_version
def _create_signature_v1(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
def _create_signature_v2(secret, s):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
hash.update(utf8(s))
return utf8(hash.hexdigest())
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
| true | true |
f717643a746a127d6805e12ee1e2871ece59b391 | 231 | py | Python | replacefs/colors.py | yoarch/replace | 5255810c019141f7de03b96c26a9b732d2218597 | [
"MIT"
] | null | null | null | replacefs/colors.py | yoarch/replace | 5255810c019141f7de03b96c26a9b732d2218597 | [
"MIT"
] | null | null | null | replacefs/colors.py | yoarch/replace | 5255810c019141f7de03b96c26a9b732d2218597 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
RED = '\033[38;5;196;1m'
ORANGE = '\033[38;5;202;1m'
WHITE = '\033[1;37m'
BLUE = '\033[1;34m'
BASE_C = '\033[0m'
GREEN = '\033[38;5;40;1m'
PURPLE = '\033[38;5;135;1m'
GREY = '\033[1;30m'
YELLOW = '\033[1;33m'
| 21 | 27 | 0.588745 |
RED = '\033[38;5;196;1m'
ORANGE = '\033[38;5;202;1m'
WHITE = '\033[1;37m'
BLUE = '\033[1;34m'
BASE_C = '\033[0m'
GREEN = '\033[38;5;40;1m'
PURPLE = '\033[38;5;135;1m'
GREY = '\033[1;30m'
YELLOW = '\033[1;33m'
| true | true |
f7176531240df0f77d476afe1bbca902bbc7bc3d | 2,864 | py | Python | src/assets/sd_vaccine_plots/main.py | drvinceknight/amwoss | 8b0bf80f0a06dc5cf9bfeef4b9f9e174ccadf06d | [
"MIT"
] | 1 | 2022-03-21T21:35:44.000Z | 2022-03-21T21:35:44.000Z | src/assets/sd_vaccine_plots/main.py | drvinceknight/amwoss | 8b0bf80f0a06dc5cf9bfeef4b9f9e174ccadf06d | [
"MIT"
] | 71 | 2019-11-18T11:00:25.000Z | 2021-10-21T22:49:40.000Z | src/assets/sd_vaccine_plots/main.py | drvinceknight/amwoss | 8b0bf80f0a06dc5cf9bfeef4b9f9e174ccadf06d | [
"MIT"
] | 1 | 2020-01-15T12:00:49.000Z | 2020-01-15T12:00:49.000Z | from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
def derivatives(t, y, vaccine_rate, birth_rate=0.01):
"""Defines the system of differential equations that
describe the epidemiology model.
Args:
t: a positive float
y: a tuple of three integers
vaccine_rate: a positive float <= 1
birth_rate: a positive float <= 1
Returns:
A tuple containing dS, dI, and dR
"""
infection_rate = 0.3
recovery_rate = 0.02
death_rate = 0.01
S, I, R = y
N = S + I + R
dSdt = (
-((infection_rate * S * I) / N)
+ ((1 - vaccine_rate) * birth_rate * N)
- (death_rate * S)
)
dIdt = (
((infection_rate * S * I) / N)
- (recovery_rate * I)
- (death_rate * I)
)
dRdt = (
(recovery_rate * I)
- (death_rate * R)
+ (vaccine_rate * birth_rate * N)
)
return dSdt, dIdt, dRdt
def integrate_ode(
derivative_function,
t_span,
y0=(2999, 1, 0),
vaccine_rate=0.85,
birth_rate=0.01,
):
"""Numerically solve the system of differential equations.
Args:
derivative_function: a function returning a tuple
of three floats
t_span: endpoints oif the time range to integrate over
y0: a tuple of three integers (default: (2999, 1, 0))
vaccine_rate: a positive float <= 1 (default: 0.85)
birth_rate: a positive float <= 1 (default: 0.01)
Returns:
A tuple of three arrays
"""
sol = solve_ivp(
derivative_function,
t_span,
y0,
args=(vaccine_rate, birth_rate),
)
ts, S, I, R = sol.t, sol.y[0], sol.y[1], sol.y[2]
return ts, S, I, R
t_span = [0, 730]
t, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.0)
fig, ax = plt.subplots(1, figsize=(10, 5))
ax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)
ax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)
ax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)
ax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))
ax.set_xlabel('Time', fontsize=14)
ax.set_ylabel('People', fontsize=14)
fig.savefig("plot_no_vaccine.pdf")
t, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.85)
fig, ax = plt.subplots(1, figsize=(10, 5))
ax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)
ax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)
ax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)
ax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))
ax.set_xlabel('Time', fontsize=14)
ax.set_ylabel('People', fontsize=14)
fig.savefig("plot_with_vaccine.pdf") | 31.822222 | 80 | 0.62081 | from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
def derivatives(t, y, vaccine_rate, birth_rate=0.01):
infection_rate = 0.3
recovery_rate = 0.02
death_rate = 0.01
S, I, R = y
N = S + I + R
dSdt = (
-((infection_rate * S * I) / N)
+ ((1 - vaccine_rate) * birth_rate * N)
- (death_rate * S)
)
dIdt = (
((infection_rate * S * I) / N)
- (recovery_rate * I)
- (death_rate * I)
)
dRdt = (
(recovery_rate * I)
- (death_rate * R)
+ (vaccine_rate * birth_rate * N)
)
return dSdt, dIdt, dRdt
def integrate_ode(
derivative_function,
t_span,
y0=(2999, 1, 0),
vaccine_rate=0.85,
birth_rate=0.01,
):
sol = solve_ivp(
derivative_function,
t_span,
y0,
args=(vaccine_rate, birth_rate),
)
ts, S, I, R = sol.t, sol.y[0], sol.y[1], sol.y[2]
return ts, S, I, R
t_span = [0, 730]
t, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.0)
fig, ax = plt.subplots(1, figsize=(10, 5))
ax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)
ax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)
ax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)
ax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))
ax.set_xlabel('Time', fontsize=14)
ax.set_ylabel('People', fontsize=14)
fig.savefig("plot_no_vaccine.pdf")
t, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.85)
fig, ax = plt.subplots(1, figsize=(10, 5))
ax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)
ax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)
ax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)
ax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))
ax.set_xlabel('Time', fontsize=14)
ax.set_ylabel('People', fontsize=14)
fig.savefig("plot_with_vaccine.pdf") | true | true |
f717657e62b89fc8eff399b8be1e18a4646e310b | 8,981 | py | Python | alipay/aop/api/request/AlipayOpenServicemarketOrderCreateRequest.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayOpenServicemarketOrderCreateRequest.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayOpenServicemarketOrderCreateRequest.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenServicemarketOrderCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._app_category_ids = None
self._app_desc = None
self._app_english_name = None
self._app_name = None
self._app_slogan = None
self._merchandise_id = None
self._merchant_pid = None
self._out_biz_no = None
self._service_email = None
self._service_phone = None
self._app_logo = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def app_category_ids(self):
return self._app_category_ids
@app_category_ids.setter
def app_category_ids(self, value):
self._app_category_ids = value
@property
def app_desc(self):
return self._app_desc
@app_desc.setter
def app_desc(self, value):
self._app_desc = value
@property
def app_english_name(self):
return self._app_english_name
@app_english_name.setter
def app_english_name(self, value):
self._app_english_name = value
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_slogan(self):
return self._app_slogan
@app_slogan.setter
def app_slogan(self, value):
self._app_slogan = value
@property
def merchandise_id(self):
return self._merchandise_id
@merchandise_id.setter
def merchandise_id(self, value):
self._merchandise_id = value
@property
def merchant_pid(self):
return self._merchant_pid
@merchant_pid.setter
def merchant_pid(self, value):
self._merchant_pid = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def service_email(self):
return self._service_email
@service_email.setter
def service_email(self, value):
self._service_email = value
@property
def service_phone(self):
return self._service_phone
@service_phone.setter
def service_phone(self, value):
self._service_phone = value
@property
def app_logo(self):
return self._app_logo
@app_logo.setter
def app_logo(self, value):
if not isinstance(value, FileItem):
return
self._app_logo = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.servicemarket.order.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.app_category_ids:
if hasattr(self.app_category_ids, 'to_alipay_dict'):
params['app_category_ids'] = json.dumps(obj=self.app_category_ids.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_category_ids'] = self.app_category_ids
if self.app_desc:
if hasattr(self.app_desc, 'to_alipay_dict'):
params['app_desc'] = json.dumps(obj=self.app_desc.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_desc'] = self.app_desc
if self.app_english_name:
if hasattr(self.app_english_name, 'to_alipay_dict'):
params['app_english_name'] = json.dumps(obj=self.app_english_name.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_english_name'] = self.app_english_name
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = json.dumps(obj=self.app_name.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_name'] = self.app_name
if self.app_slogan:
if hasattr(self.app_slogan, 'to_alipay_dict'):
params['app_slogan'] = json.dumps(obj=self.app_slogan.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_slogan'] = self.app_slogan
if self.merchandise_id:
if hasattr(self.merchandise_id, 'to_alipay_dict'):
params['merchandise_id'] = json.dumps(obj=self.merchandise_id.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['merchandise_id'] = self.merchandise_id
if self.merchant_pid:
if hasattr(self.merchant_pid, 'to_alipay_dict'):
params['merchant_pid'] = json.dumps(obj=self.merchant_pid.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['merchant_pid'] = self.merchant_pid
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = json.dumps(obj=self.out_biz_no.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['out_biz_no'] = self.out_biz_no
if self.service_email:
if hasattr(self.service_email, 'to_alipay_dict'):
params['service_email'] = json.dumps(obj=self.service_email.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['service_email'] = self.service_email
if self.service_phone:
if hasattr(self.service_phone, 'to_alipay_dict'):
params['service_phone'] = json.dumps(obj=self.service_phone.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['service_phone'] = self.service_phone
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
if self.app_logo:
multipart_params['app_logo'] = self.app_logo
return multipart_params
| 33.262963 | 176 | 0.635119 |
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenServicemarketOrderCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._app_category_ids = None
self._app_desc = None
self._app_english_name = None
self._app_name = None
self._app_slogan = None
self._merchandise_id = None
self._merchant_pid = None
self._out_biz_no = None
self._service_email = None
self._service_phone = None
self._app_logo = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def app_category_ids(self):
return self._app_category_ids
@app_category_ids.setter
def app_category_ids(self, value):
self._app_category_ids = value
@property
def app_desc(self):
return self._app_desc
@app_desc.setter
def app_desc(self, value):
self._app_desc = value
@property
def app_english_name(self):
return self._app_english_name
@app_english_name.setter
def app_english_name(self, value):
self._app_english_name = value
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_slogan(self):
return self._app_slogan
@app_slogan.setter
def app_slogan(self, value):
self._app_slogan = value
@property
def merchandise_id(self):
return self._merchandise_id
@merchandise_id.setter
def merchandise_id(self, value):
self._merchandise_id = value
@property
def merchant_pid(self):
return self._merchant_pid
@merchant_pid.setter
def merchant_pid(self, value):
self._merchant_pid = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def service_email(self):
return self._service_email
@service_email.setter
def service_email(self, value):
self._service_email = value
@property
def service_phone(self):
return self._service_phone
@service_phone.setter
def service_phone(self, value):
self._service_phone = value
@property
def app_logo(self):
return self._app_logo
@app_logo.setter
def app_logo(self, value):
if not isinstance(value, FileItem):
return
self._app_logo = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.servicemarket.order.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.app_category_ids:
if hasattr(self.app_category_ids, 'to_alipay_dict'):
params['app_category_ids'] = json.dumps(obj=self.app_category_ids.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_category_ids'] = self.app_category_ids
if self.app_desc:
if hasattr(self.app_desc, 'to_alipay_dict'):
params['app_desc'] = json.dumps(obj=self.app_desc.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_desc'] = self.app_desc
if self.app_english_name:
if hasattr(self.app_english_name, 'to_alipay_dict'):
params['app_english_name'] = json.dumps(obj=self.app_english_name.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_english_name'] = self.app_english_name
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = json.dumps(obj=self.app_name.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_name'] = self.app_name
if self.app_slogan:
if hasattr(self.app_slogan, 'to_alipay_dict'):
params['app_slogan'] = json.dumps(obj=self.app_slogan.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['app_slogan'] = self.app_slogan
if self.merchandise_id:
if hasattr(self.merchandise_id, 'to_alipay_dict'):
params['merchandise_id'] = json.dumps(obj=self.merchandise_id.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['merchandise_id'] = self.merchandise_id
if self.merchant_pid:
if hasattr(self.merchant_pid, 'to_alipay_dict'):
params['merchant_pid'] = json.dumps(obj=self.merchant_pid.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['merchant_pid'] = self.merchant_pid
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = json.dumps(obj=self.out_biz_no.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['out_biz_no'] = self.out_biz_no
if self.service_email:
if hasattr(self.service_email, 'to_alipay_dict'):
params['service_email'] = json.dumps(obj=self.service_email.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['service_email'] = self.service_email
if self.service_phone:
if hasattr(self.service_phone, 'to_alipay_dict'):
params['service_phone'] = json.dumps(obj=self.service_phone.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['service_phone'] = self.service_phone
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
if self.app_logo:
multipart_params['app_logo'] = self.app_logo
return multipart_params
| true | true |
f71767ca2c739f1474e2622061bcec8545048d82 | 18,990 | py | Python | plugins/modules/oci_opsi_resource_forecast_trend_facts.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_opsi_resource_forecast_trend_facts.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_opsi_resource_forecast_trend_facts.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_opsi_resource_forecast_trend_facts
short_description: Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
- Get Forecast predictions for CPU and Storage resources since a time in the past.
If compartmentIdInSubtree is specified, aggregates resources in a compartment and in all sub-compartments.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
type: str
required: true
resource_metric:
description:
- Filter by resource metric.
Supported values are CPU , STORAGE, MEMORY and IO.
type: str
required: true
analysis_time_interval:
description:
- Specify time period in ISO 8601 format with respect to current time.
Default is last 30 days represented by P30D.
If timeInterval is specified, then timeIntervalStart and timeIntervalEnd will be ignored.
Examples P90D (last 90 days), P4W (last 4 weeks), P2M (last 2 months), P1Y (last 12 months), . Maximum value allowed is 25 months prior to
current time (P25M).
type: str
time_interval_start:
description:
- Analysis start time in UTC in ISO 8601 format(inclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
The minimum allowed value is 2 years prior to the current day.
timeIntervalStart and timeIntervalEnd parameters are used together.
If analysisTimeInterval is specified, this parameter is ignored.
type: str
time_interval_end:
description:
- Analysis end time in UTC in ISO 8601 format(exclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
timeIntervalStart and timeIntervalEnd are used together.
If timeIntervalEnd is not specified, current time is used as timeIntervalEnd.
type: str
database_type:
description:
- Filter by one or more database type.
Possible values are ADW-S, ATP-S, ADW-D, ATP-D, EXTERNAL-PDB, EXTERNAL-NONCDB.
type: list
elements: str
choices:
- "ADW-S"
- "ATP-S"
- "ADW-D"
- "ATP-D"
- "EXTERNAL-PDB"
- "EXTERNAL-NONCDB"
database_id:
description:
- Optional list of database L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the associated DBaaS entity.
type: list
elements: str
id:
description:
- Optional list of database insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
exadata_insight_id:
description:
- Optional list of exadata insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
cdb_name:
description:
- Filter by one or more cdb name.
type: list
elements: str
statistic:
description:
- Choose the type of statistic metric data to be used for forecasting.
type: str
choices:
- "AVG"
- "MAX"
forecast_days:
description:
- Number of days used for utilization forecast analysis.
type: int
forecast_model:
description:
- "Choose algorithm model for the forecasting.
Possible values:
- LINEAR: Uses linear regression algorithm for forecasting.
- ML_AUTO: Automatically detects best algorithm to use for forecasting.
- ML_NO_AUTO: Automatically detects seasonality of the data for forecasting using linear or seasonal algorithm."
type: str
choices:
- "LINEAR"
- "ML_AUTO"
- "ML_NO_AUTO"
utilization_level:
description:
- "Filter by utilization level by the following buckets:
- HIGH_UTILIZATION: DBs with utilization greater or equal than 75.
- LOW_UTILIZATION: DBs with utilization lower than 25.
- MEDIUM_HIGH_UTILIZATION: DBs with utilization greater or equal than 50 but lower than 75.
- MEDIUM_LOW_UTILIZATION: DBs with utilization greater or equal than 25 but lower than 50."
type: str
choices:
- "HIGH_UTILIZATION"
- "LOW_UTILIZATION"
- "MEDIUM_HIGH_UTILIZATION"
- "MEDIUM_LOW_UTILIZATION"
confidence:
description:
- This parameter is used to change data's confidence level, this data is ingested by the
forecast algorithm.
Confidence is the probability of an interval to contain the expected population parameter.
Manipulation of this value will lead to different results.
If not set, default confidence value is 95%.
type: int
host_name:
description:
- Filter by one or more hostname.
type: list
elements: str
tablespace_name:
description:
- Tablespace name for a database
type: str
is_database_instance_level_metrics:
description:
- Flag to indicate if database instance level metrics should be returned. The flag is ignored when a host name filter is not applied.
When a hostname filter is applied this flag will determine whether to return metrics for the instances located on the specified host or for the
whole database which contains an instance on this host.
type: bool
defined_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a defined tag matching the value will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a freeform tag matching the value will be returned.
The key for each tag is \\"{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same tag name are interpreted as \\"OR\\". Values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
defined_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified defined tags exist will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.true\\" (for checking existence of a defined tag)
or \\"{namespace}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified freeform tags exist the value will be returned.
The key for each tag is \\"{tagName}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
compartment_id_in_subtree:
description:
- A flag to search all resources within a given compartment and all sub-compartments.
type: bool
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific resource_forecast_trend
oci_opsi_resource_forecast_trend_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
resource_metric: resource_metric_example
# optional
analysis_time_interval: analysis_time_interval_example
time_interval_start: 2013-10-20T19:20:30+01:00
time_interval_end: 2013-10-20T19:20:30+01:00
database_type: [ "$p.getValue()" ]
database_id: [ "$p.getValue()" ]
id: [ "$p.getValue()" ]
exadata_insight_id: [ "$p.getValue()" ]
cdb_name: [ "$p.getValue()" ]
statistic: AVG
forecast_days: 56
forecast_model: LINEAR
utilization_level: HIGH_UTILIZATION
confidence: 56
host_name: [ "$p.getValue()" ]
tablespace_name: tablespace_name_example
is_database_instance_level_metrics: true
defined_tag_equals: [ "$p.getValue()" ]
freeform_tag_equals: [ "$p.getValue()" ]
defined_tag_exists: [ "$p.getValue()" ]
freeform_tag_exists: [ "$p.getValue()" ]
compartment_id_in_subtree: true
"""
RETURN = """
resource_forecast_trend:
description:
- ResourceForecastTrend resource
returned: on success
type: complex
contains:
time_interval_start:
description:
- The start timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
time_interval_end:
description:
- The end timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
resource_metric:
description:
- "Defines the type of resource metric (example: CPU, STORAGE)"
returned: on success
type: str
sample: STORAGE
usage_unit:
description:
- Displays usage unit ( CORES, GB)
returned: on success
type: str
sample: CORES
pattern:
description:
- Time series patterns used in the forecasting.
returned: on success
type: str
sample: LINEAR
tablespace_name:
description:
- The name of tablespace.
returned: on success
type: str
sample: tablespace_name_example
historical_data:
description:
- Time series data used for the forecast analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
projected_data:
description:
- Time series data result of the forecasting analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
high_value:
description:
- Upper uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
low_value:
description:
- Lower uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
sample: {
"time_interval_start": "2020-12-06T00:00:00.000Z",
"time_interval_end": "2020-12-06T00:00:00.000Z",
"resource_metric": "STORAGE",
"usage_unit": "CORES",
"pattern": "LINEAR",
"tablespace_name": "tablespace_name_example",
"historical_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5
}],
"projected_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5,
"high_value": 1.2,
"low_value": 1.2
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.opsi import OperationsInsightsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ResourceForecastTrendFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"compartment_id",
"resource_metric",
]
def get_resource(self):
optional_get_method_params = [
"analysis_time_interval",
"time_interval_start",
"time_interval_end",
"database_type",
"database_id",
"id",
"exadata_insight_id",
"cdb_name",
"statistic",
"forecast_days",
"forecast_model",
"utilization_level",
"confidence",
"host_name",
"tablespace_name",
"is_database_instance_level_metrics",
"defined_tag_equals",
"freeform_tag_equals",
"defined_tag_exists",
"freeform_tag_exists",
"compartment_id_in_subtree",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.summarize_database_insight_resource_forecast_trend,
compartment_id=self.module.params.get("compartment_id"),
resource_metric=self.module.params.get("resource_metric"),
**optional_kwargs
)
ResourceForecastTrendFactsHelperCustom = get_custom_class(
"ResourceForecastTrendFactsHelperCustom"
)
class ResourceFactsHelper(
ResourceForecastTrendFactsHelperCustom, ResourceForecastTrendFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=True),
resource_metric=dict(type="str", required=True),
analysis_time_interval=dict(type="str"),
time_interval_start=dict(type="str"),
time_interval_end=dict(type="str"),
database_type=dict(
type="list",
elements="str",
choices=[
"ADW-S",
"ATP-S",
"ADW-D",
"ATP-D",
"EXTERNAL-PDB",
"EXTERNAL-NONCDB",
],
),
database_id=dict(type="list", elements="str"),
id=dict(type="list", elements="str"),
exadata_insight_id=dict(type="list", elements="str"),
cdb_name=dict(type="list", elements="str"),
statistic=dict(type="str", choices=["AVG", "MAX"]),
forecast_days=dict(type="int"),
forecast_model=dict(
type="str", choices=["LINEAR", "ML_AUTO", "ML_NO_AUTO"]
),
utilization_level=dict(
type="str",
choices=[
"HIGH_UTILIZATION",
"LOW_UTILIZATION",
"MEDIUM_HIGH_UTILIZATION",
"MEDIUM_LOW_UTILIZATION",
],
),
confidence=dict(type="int"),
host_name=dict(type="list", elements="str"),
tablespace_name=dict(type="str"),
is_database_instance_level_metrics=dict(type="bool"),
defined_tag_equals=dict(type="list", elements="str"),
freeform_tag_equals=dict(type="list", elements="str"),
defined_tag_exists=dict(type="list", elements="str"),
freeform_tag_exists=dict(type="list", elements="str"),
compartment_id_in_subtree=dict(type="bool"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="resource_forecast_trend",
service_client_class=OperationsInsightsClient,
namespace="opsi",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(resource_forecast_trend=result)
if __name__ == "__main__":
main()
| 38.99384 | 157 | 0.597999 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_opsi_resource_forecast_trend_facts
short_description: Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
- Get Forecast predictions for CPU and Storage resources since a time in the past.
If compartmentIdInSubtree is specified, aggregates resources in a compartment and in all sub-compartments.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
type: str
required: true
resource_metric:
description:
- Filter by resource metric.
Supported values are CPU , STORAGE, MEMORY and IO.
type: str
required: true
analysis_time_interval:
description:
- Specify time period in ISO 8601 format with respect to current time.
Default is last 30 days represented by P30D.
If timeInterval is specified, then timeIntervalStart and timeIntervalEnd will be ignored.
Examples P90D (last 90 days), P4W (last 4 weeks), P2M (last 2 months), P1Y (last 12 months), . Maximum value allowed is 25 months prior to
current time (P25M).
type: str
time_interval_start:
description:
- Analysis start time in UTC in ISO 8601 format(inclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
The minimum allowed value is 2 years prior to the current day.
timeIntervalStart and timeIntervalEnd parameters are used together.
If analysisTimeInterval is specified, this parameter is ignored.
type: str
time_interval_end:
description:
- Analysis end time in UTC in ISO 8601 format(exclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
timeIntervalStart and timeIntervalEnd are used together.
If timeIntervalEnd is not specified, current time is used as timeIntervalEnd.
type: str
database_type:
description:
- Filter by one or more database type.
Possible values are ADW-S, ATP-S, ADW-D, ATP-D, EXTERNAL-PDB, EXTERNAL-NONCDB.
type: list
elements: str
choices:
- "ADW-S"
- "ATP-S"
- "ADW-D"
- "ATP-D"
- "EXTERNAL-PDB"
- "EXTERNAL-NONCDB"
database_id:
description:
- Optional list of database L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the associated DBaaS entity.
type: list
elements: str
id:
description:
- Optional list of database insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
exadata_insight_id:
description:
- Optional list of exadata insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
cdb_name:
description:
- Filter by one or more cdb name.
type: list
elements: str
statistic:
description:
- Choose the type of statistic metric data to be used for forecasting.
type: str
choices:
- "AVG"
- "MAX"
forecast_days:
description:
- Number of days used for utilization forecast analysis.
type: int
forecast_model:
description:
- "Choose algorithm model for the forecasting.
Possible values:
- LINEAR: Uses linear regression algorithm for forecasting.
- ML_AUTO: Automatically detects best algorithm to use for forecasting.
- ML_NO_AUTO: Automatically detects seasonality of the data for forecasting using linear or seasonal algorithm."
type: str
choices:
- "LINEAR"
- "ML_AUTO"
- "ML_NO_AUTO"
utilization_level:
description:
- "Filter by utilization level by the following buckets:
- HIGH_UTILIZATION: DBs with utilization greater or equal than 75.
- LOW_UTILIZATION: DBs with utilization lower than 25.
- MEDIUM_HIGH_UTILIZATION: DBs with utilization greater or equal than 50 but lower than 75.
- MEDIUM_LOW_UTILIZATION: DBs with utilization greater or equal than 25 but lower than 50."
type: str
choices:
- "HIGH_UTILIZATION"
- "LOW_UTILIZATION"
- "MEDIUM_HIGH_UTILIZATION"
- "MEDIUM_LOW_UTILIZATION"
confidence:
description:
- This parameter is used to change data's confidence level, this data is ingested by the
forecast algorithm.
Confidence is the probability of an interval to contain the expected population parameter.
Manipulation of this value will lead to different results.
If not set, default confidence value is 95%.
type: int
host_name:
description:
- Filter by one or more hostname.
type: list
elements: str
tablespace_name:
description:
- Tablespace name for a database
type: str
is_database_instance_level_metrics:
description:
- Flag to indicate if database instance level metrics should be returned. The flag is ignored when a host name filter is not applied.
When a hostname filter is applied this flag will determine whether to return metrics for the instances located on the specified host or for the
whole database which contains an instance on this host.
type: bool
defined_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a defined tag matching the value will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a freeform tag matching the value will be returned.
The key for each tag is \\"{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same tag name are interpreted as \\"OR\\". Values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
defined_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified defined tags exist will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.true\\" (for checking existence of a defined tag)
or \\"{namespace}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified freeform tags exist the value will be returned.
The key for each tag is \\"{tagName}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
compartment_id_in_subtree:
description:
- A flag to search all resources within a given compartment and all sub-compartments.
type: bool
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific resource_forecast_trend
oci_opsi_resource_forecast_trend_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
resource_metric: resource_metric_example
# optional
analysis_time_interval: analysis_time_interval_example
time_interval_start: 2013-10-20T19:20:30+01:00
time_interval_end: 2013-10-20T19:20:30+01:00
database_type: [ "$p.getValue()" ]
database_id: [ "$p.getValue()" ]
id: [ "$p.getValue()" ]
exadata_insight_id: [ "$p.getValue()" ]
cdb_name: [ "$p.getValue()" ]
statistic: AVG
forecast_days: 56
forecast_model: LINEAR
utilization_level: HIGH_UTILIZATION
confidence: 56
host_name: [ "$p.getValue()" ]
tablespace_name: tablespace_name_example
is_database_instance_level_metrics: true
defined_tag_equals: [ "$p.getValue()" ]
freeform_tag_equals: [ "$p.getValue()" ]
defined_tag_exists: [ "$p.getValue()" ]
freeform_tag_exists: [ "$p.getValue()" ]
compartment_id_in_subtree: true
"""
RETURN = """
resource_forecast_trend:
description:
- ResourceForecastTrend resource
returned: on success
type: complex
contains:
time_interval_start:
description:
- The start timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
time_interval_end:
description:
- The end timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
resource_metric:
description:
- "Defines the type of resource metric (example: CPU, STORAGE)"
returned: on success
type: str
sample: STORAGE
usage_unit:
description:
- Displays usage unit ( CORES, GB)
returned: on success
type: str
sample: CORES
pattern:
description:
- Time series patterns used in the forecasting.
returned: on success
type: str
sample: LINEAR
tablespace_name:
description:
- The name of tablespace.
returned: on success
type: str
sample: tablespace_name_example
historical_data:
description:
- Time series data used for the forecast analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
projected_data:
description:
- Time series data result of the forecasting analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
high_value:
description:
- Upper uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
low_value:
description:
- Lower uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
sample: {
"time_interval_start": "2020-12-06T00:00:00.000Z",
"time_interval_end": "2020-12-06T00:00:00.000Z",
"resource_metric": "STORAGE",
"usage_unit": "CORES",
"pattern": "LINEAR",
"tablespace_name": "tablespace_name_example",
"historical_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5
}],
"projected_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5,
"high_value": 1.2,
"low_value": 1.2
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.opsi import OperationsInsightsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ResourceForecastTrendFactsHelperGen(OCIResourceFactsHelperBase):
def get_required_params_for_get(self):
return [
"compartment_id",
"resource_metric",
]
def get_resource(self):
optional_get_method_params = [
"analysis_time_interval",
"time_interval_start",
"time_interval_end",
"database_type",
"database_id",
"id",
"exadata_insight_id",
"cdb_name",
"statistic",
"forecast_days",
"forecast_model",
"utilization_level",
"confidence",
"host_name",
"tablespace_name",
"is_database_instance_level_metrics",
"defined_tag_equals",
"freeform_tag_equals",
"defined_tag_exists",
"freeform_tag_exists",
"compartment_id_in_subtree",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.summarize_database_insight_resource_forecast_trend,
compartment_id=self.module.params.get("compartment_id"),
resource_metric=self.module.params.get("resource_metric"),
**optional_kwargs
)
ResourceForecastTrendFactsHelperCustom = get_custom_class(
"ResourceForecastTrendFactsHelperCustom"
)
class ResourceFactsHelper(
ResourceForecastTrendFactsHelperCustom, ResourceForecastTrendFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=True),
resource_metric=dict(type="str", required=True),
analysis_time_interval=dict(type="str"),
time_interval_start=dict(type="str"),
time_interval_end=dict(type="str"),
database_type=dict(
type="list",
elements="str",
choices=[
"ADW-S",
"ATP-S",
"ADW-D",
"ATP-D",
"EXTERNAL-PDB",
"EXTERNAL-NONCDB",
],
),
database_id=dict(type="list", elements="str"),
id=dict(type="list", elements="str"),
exadata_insight_id=dict(type="list", elements="str"),
cdb_name=dict(type="list", elements="str"),
statistic=dict(type="str", choices=["AVG", "MAX"]),
forecast_days=dict(type="int"),
forecast_model=dict(
type="str", choices=["LINEAR", "ML_AUTO", "ML_NO_AUTO"]
),
utilization_level=dict(
type="str",
choices=[
"HIGH_UTILIZATION",
"LOW_UTILIZATION",
"MEDIUM_HIGH_UTILIZATION",
"MEDIUM_LOW_UTILIZATION",
],
),
confidence=dict(type="int"),
host_name=dict(type="list", elements="str"),
tablespace_name=dict(type="str"),
is_database_instance_level_metrics=dict(type="bool"),
defined_tag_equals=dict(type="list", elements="str"),
freeform_tag_equals=dict(type="list", elements="str"),
defined_tag_exists=dict(type="list", elements="str"),
freeform_tag_exists=dict(type="list", elements="str"),
compartment_id_in_subtree=dict(type="bool"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="resource_forecast_trend",
service_client_class=OperationsInsightsClient,
namespace="opsi",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(resource_forecast_trend=result)
if __name__ == "__main__":
main()
| true | true |
f71767d27ceb9c0a11fa4ade0519de1ef382cf0f | 1,385 | py | Python | 2020/src/day6.py | vionion/advent-of-code-2019 | c3389694a42e9b1d978f82c6fb42925465799734 | [
"MIT"
] | null | null | null | 2020/src/day6.py | vionion/advent-of-code-2019 | c3389694a42e9b1d978f82c6fb42925465799734 | [
"MIT"
] | null | null | null | 2020/src/day6.py | vionion/advent-of-code-2019 | c3389694a42e9b1d978f82c6fb42925465799734 | [
"MIT"
] | null | null | null | from typing import List, Set
from io_utils import read_input_file
def day6_1():
input_list = read_input_file("day6.txt", input_type=str)
answers = get_all_yes_answers_per_group(input_list)
amount_yes_answers = 0
for answers_per_group in answers:
amount_yes_answers += len(answers_per_group)
return amount_yes_answers
def get_all_yes_answers_per_group(input_list):
answers: List[Set[str]] = []
i = 0
for line in input_list:
if line == "\n":
i += 1
else:
if i == len(answers):
answers.append(set())
line = line.strip()
for char in line:
answers[i].add(char)
return answers
def get_common_yes_answers_per_group(input_list):
answers: List[Set[str]] = []
i = 0
for line in input_list:
if line == "\n":
i += 1
else:
line = line.strip()
if i == len(answers):
answers.append(set(line))
else:
answers[i] = answers[i].intersection(line)
return answers
def day6_2():
input_list = read_input_file("day6.txt", input_type=str)
answers = get_common_yes_answers_per_group(input_list)
amount_yes_answers = 0
for answers_per_group in answers:
amount_yes_answers += len(answers_per_group)
return amount_yes_answers
| 26.634615 | 60 | 0.61083 | from typing import List, Set
from io_utils import read_input_file
def day6_1():
input_list = read_input_file("day6.txt", input_type=str)
answers = get_all_yes_answers_per_group(input_list)
amount_yes_answers = 0
for answers_per_group in answers:
amount_yes_answers += len(answers_per_group)
return amount_yes_answers
def get_all_yes_answers_per_group(input_list):
answers: List[Set[str]] = []
i = 0
for line in input_list:
if line == "\n":
i += 1
else:
if i == len(answers):
answers.append(set())
line = line.strip()
for char in line:
answers[i].add(char)
return answers
def get_common_yes_answers_per_group(input_list):
answers: List[Set[str]] = []
i = 0
for line in input_list:
if line == "\n":
i += 1
else:
line = line.strip()
if i == len(answers):
answers.append(set(line))
else:
answers[i] = answers[i].intersection(line)
return answers
def day6_2():
input_list = read_input_file("day6.txt", input_type=str)
answers = get_common_yes_answers_per_group(input_list)
amount_yes_answers = 0
for answers_per_group in answers:
amount_yes_answers += len(answers_per_group)
return amount_yes_answers
| true | true |
f71768dfa296b62f40248813c15aa926044590df | 360 | py | Python | votes/migrations/0002_auto_20190529_0721.py | isidaruk/eurovision_project | 976743e66a2fed17c0513f17a9a7d35850e9cde5 | [
"MIT"
] | null | null | null | votes/migrations/0002_auto_20190529_0721.py | isidaruk/eurovision_project | 976743e66a2fed17c0513f17a9a7d35850e9cde5 | [
"MIT"
] | 8 | 2020-02-12T00:23:27.000Z | 2022-03-08T21:10:13.000Z | votes/migrations/0002_auto_20190529_0721.py | isidaruk/eurovision_project | 976743e66a2fed17c0513f17a9a7d35850e9cde5 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-29 07:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('votes', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='vote',
old_name='to_country',
new_name='to_participant',
),
]
| 18.947368 | 47 | 0.580556 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('votes', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='vote',
old_name='to_country',
new_name='to_participant',
),
]
| true | true |
f717690a322e1a696f5c7c83ea215426620aa34e | 673 | py | Python | src/data/456.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/456.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/456.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | from collections import deque
n, m = map(int, input().split())
graph = [list() for _ in range(n)]
for _ in range(n - 1):
u, k = [int(x) for x in input().split()] # uは頂点番号、kは隣接頂点の個数
u, k = u - 1, k - 1
graph[u].append(k)
graph[k].append(u) # 無向グラフ
dist = [-1] * n #距離
dist[0] = 0 #startは0
q = deque()
q.append(0) #startは0
while q: #qが空になるまで
v = q.popleft()
for x in graph[v]:
if dist[x] != -1: #更新
continue
dist[x] = 1 - dist[v]
q.append(x)
for i in range(m):
c, d = map(int, input().split())
c, d = c - 1, d - 1
if dist[c] != dist[d]:
print("Road")
else:
print("Town")
| 21.03125 | 64 | 0.499257 | from collections import deque
n, m = map(int, input().split())
graph = [list() for _ in range(n)]
for _ in range(n - 1):
u, k = [int(x) for x in input().split()]
u, k = u - 1, k - 1
graph[u].append(k)
graph[k].append(u)
dist = [-1] * n
dist[0] = 0
q = deque()
q.append(0)
while q:
v = q.popleft()
for x in graph[v]:
if dist[x] != -1:
continue
dist[x] = 1 - dist[v]
q.append(x)
for i in range(m):
c, d = map(int, input().split())
c, d = c - 1, d - 1
if dist[c] != dist[d]:
print("Road")
else:
print("Town")
| true | true |
f71769abced2b28f75a37afaba30e87febaaf7f8 | 2,143 | py | Python | Tracker/update.py | nordwind80/BT-Tracker | 558c15b399871c1ca11d0c4ae1eb598e3060931e | [
"MIT"
] | 1 | 2019-05-05T06:46:27.000Z | 2019-05-05T06:46:27.000Z | Tracker/update.py | nordwind80/BT-Tracker | 558c15b399871c1ca11d0c4ae1eb598e3060931e | [
"MIT"
] | null | null | null | Tracker/update.py | nordwind80/BT-Tracker | 558c15b399871c1ca11d0c4ae1eb598e3060931e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Author: eaglewings
# E-Mail: ZWFnbGV3aW5ncy55aUBnbWFpbC5jb20=
# Created Time: 2019-04-17 15:17
# Last Modified:
# Description:
# - Project: BT Trackers Updater
# - File Name: update.py
# - Trackers Updater
import os
import re
from typing import NoReturn
class Filer(object):
def __init__(self):
self._aria2_path = "/.aria2/"
self._file_name = "aria2.conf"
self._file_path = f"{self._get_home}{self._aria2_path}"
@property
def _get_home(self) -> str:
"""
Return User $HOME path.
:return: str
"""
return os.path.expanduser("~")
@property
def get_path(self) -> str:
"""
Return Aria2 config file path.
:return: str
"""
return f"{self._file_path}{self._file_name}"
def _create_dir(self) -> NoReturn:
try:
os.mkdir(f"{self._file_path}")
except FileExistsError as why:
print(f"Create directory failed. {why}")
def check_dirctory(self) -> bool:
"""
Find Aria2 directory, If not create it. If mkdir fail, raise FileExistsError error.
:return: NoReturn
"""
if os.path.exists(f"{self._file_path}"):
return True
else:
self._create_dir()
return False
class Updater(object):
def __init__(self, path: str, trackers: str):
self._path = path
self._trackers = trackers
def start(self) -> NoReturn:
check = False
with open(self._path, "r+") as file:
lines = file.readlines()
file.seek(0)
file.truncate()
for line in lines:
if re.search(r"bt-tracker=.*", line):
line = line.replace(line, f"bt-tracker={self._trackers}\n")
file.write(line)
check = True
else:
file.write(line)
else:
if check:
return
else:
file.write(f"bt-tracker={self._trackers}\n")
| 26.45679 | 95 | 0.527765 |
import os
import re
from typing import NoReturn
class Filer(object):
def __init__(self):
self._aria2_path = "/.aria2/"
self._file_name = "aria2.conf"
self._file_path = f"{self._get_home}{self._aria2_path}"
@property
def _get_home(self) -> str:
return os.path.expanduser("~")
@property
def get_path(self) -> str:
return f"{self._file_path}{self._file_name}"
def _create_dir(self) -> NoReturn:
try:
os.mkdir(f"{self._file_path}")
except FileExistsError as why:
print(f"Create directory failed. {why}")
def check_dirctory(self) -> bool:
if os.path.exists(f"{self._file_path}"):
return True
else:
self._create_dir()
return False
class Updater(object):
def __init__(self, path: str, trackers: str):
self._path = path
self._trackers = trackers
def start(self) -> NoReturn:
check = False
with open(self._path, "r+") as file:
lines = file.readlines()
file.seek(0)
file.truncate()
for line in lines:
if re.search(r"bt-tracker=.*", line):
line = line.replace(line, f"bt-tracker={self._trackers}\n")
file.write(line)
check = True
else:
file.write(line)
else:
if check:
return
else:
file.write(f"bt-tracker={self._trackers}\n")
| true | true |
f7176bc8b9827c81ae4f1e4df6897e9563ad218f | 2,628 | py | Python | source/boundaryconds.py | agstub/sglake-detectability | 5556250a59d7f500bcee86899dd9a497a368faca | [
"MIT"
] | 1 | 2021-05-27T12:24:35.000Z | 2021-05-27T12:24:35.000Z | source/boundaryconds.py | ldeo-glaciology/sglake-detectability | 5556250a59d7f500bcee86899dd9a497a368faca | [
"MIT"
] | null | null | null | source/boundaryconds.py | ldeo-glaciology/sglake-detectability | 5556250a59d7f500bcee86899dd9a497a368faca | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------------
# This file contains functions that:
# (1) define the boundaries (ice-air,ice-water,ice-bed) of the mesh, AND...
# (2) mark the boundaries of the mesh
#-------------------------------------------------------------------------------
from params import tol,Lngth,Hght
from geometry import bed
import numpy as np
from dolfin import *
#-------------------------------------------------------------------------------
# Define SubDomains for ice-water boundary, ice-bed boundary, inflow (x=0) and
# outflow (x=Length of domain). The parameter 'tol' is a minimal water depth
# used to distinguish the ice-water and ice-bed surfaces.
class WaterBoundary(SubDomain):
# Ice-water boundary.
# This boundary is marked first and all of the irrelevant portions are
# overwritten by the other boundary markers.
def inside(self, x, on_boundary):
return (on_boundary and (x[1]<0.5*Hght))
class BedBoundary(SubDomain):
# Ice-bed boundary away from the lake; the portions near the lake are overwritten
# by BasinBoundary.
# Lifting of ice from the bed *is not* allowed on this boundary.
def inside(self, x, on_boundary):
return (on_boundary and ((x[1]-bed(x[0]))<=tol))
class LeftBoundary(SubDomain):
# Left boundary
def inside(self, x, on_boundary):
return (on_boundary and np.abs(x[0])<tol)
class RightBoundary(SubDomain):
# Right boundary
def inside(self, x, on_boundary):
return (on_boundary and np.abs(x[0]-Lngth)<tol)
#-------------------------------------------------------------------------------
def mark_boundary(mesh):
# Assign markers to each boundary segment (except the upper surface).
# This is used at each time step to update the markers.
#
# Boundary marker numbering convention:
# 1 - Left boundary
# 2 - Right boundary
# 3 - Ice-bed boundary
# 4 - Ice-water boundary
#
# This function returns these markers, which are used to define the
# boundary integrals and dirichlet conditions.
boundary_markers = MeshFunction('size_t', mesh,dim=1)
boundary_markers.set_all(0)
# Mark ice-water boundary
bdryWater = WaterBoundary()
bdryWater.mark(boundary_markers, 4)
# Mark ice-bed boundary away from lake
bdryBed = BedBoundary()
bdryBed.mark(boundary_markers, 3)
# Mark inflow boundary
bdryLeft = LeftBoundary()
bdryLeft.mark(boundary_markers, 1)
# Mark outflow boundary
bdryRight = RightBoundary()
bdryRight.mark(boundary_markers, 2)
return boundary_markers
| 35.04 | 85 | 0.614916 |
from params import tol,Lngth,Hght
from geometry import bed
import numpy as np
from dolfin import *
class WaterBoundary(SubDomain):
def inside(self, x, on_boundary):
return (on_boundary and (x[1]<0.5*Hght))
class BedBoundary(SubDomain):
def inside(self, x, on_boundary):
return (on_boundary and ((x[1]-bed(x[0]))<=tol))
class LeftBoundary(SubDomain):
def inside(self, x, on_boundary):
return (on_boundary and np.abs(x[0])<tol)
class RightBoundary(SubDomain):
def inside(self, x, on_boundary):
return (on_boundary and np.abs(x[0]-Lngth)<tol)
def mark_boundary(mesh):
boundary_markers = MeshFunction('size_t', mesh,dim=1)
boundary_markers.set_all(0)
bdryWater = WaterBoundary()
bdryWater.mark(boundary_markers, 4)
bdryBed = BedBoundary()
bdryBed.mark(boundary_markers, 3)
bdryLeft = LeftBoundary()
bdryLeft.mark(boundary_markers, 1)
bdryRight = RightBoundary()
bdryRight.mark(boundary_markers, 2)
return boundary_markers
| true | true |
f7176c29d3c8975aef635b4e3270b662412a46af | 1,825 | py | Python | data_mining/dataset/main.py | basantbhandari/LaptopPricePrediction | 086cfaf99b7c625345d5d383ba7f7e2109821c43 | [
"MIT"
] | null | null | null | data_mining/dataset/main.py | basantbhandari/LaptopPricePrediction | 086cfaf99b7c625345d5d383ba7f7e2109821c43 | [
"MIT"
] | null | null | null | data_mining/dataset/main.py | basantbhandari/LaptopPricePrediction | 086cfaf99b7c625345d5d383ba7f7e2109821c43 | [
"MIT"
] | null | null | null | print("Scrape the dataset from...")
# import the necessary library
from bs4 import BeautifulSoup
import requests
import pandas as pd
# Request to website and download HTML contents
url='https://www.gadgetbytenepal.com/category/laptop-price-in-nepal/'
# write data in a file.
file1 = open("alldata.txt","w")
req=requests.get(url)
htmlcontent=req.content
# print(content)
# Format the downloadable content
soup=BeautifulSoup(htmlcontent, 'html.parser')
# print(soup.prettify())
desired_content = soup.find(class_='td-category-description')
print("############################################################")
# print(desired_content.prettify())
print("############################################################")
data_header = desired_content.find_all('h2')
print("############################################################")
#print(data_header)
print("############################################################")
print("############################################################")
#for item in data_header:
#print(item.get_text())
print("############################################################")
data_items = desired_content.find_all('div', class_ = 'su-table su-table-alternate')
print("############################################################")
# print(data_items)
print("############################################################")
print("############################################################")
i=0
for item in data_items:
print("############################################################")
# print(item)
eachrow = item.find_all('tr')
for tabledata in eachrow:
print(tabledata.get_text())
file1.writelines(tabledata.get_text())
i=i+1
print("\n",i)
print("############################################################")
file1.close()
| 19.623656 | 84 | 0.447671 | print("Scrape the dataset from...")
from bs4 import BeautifulSoup
import requests
import pandas as pd
url='https://www.gadgetbytenepal.com/category/laptop-price-in-nepal/'
file1 = open("alldata.txt","w")
req=requests.get(url)
htmlcontent=req.content
soup=BeautifulSoup(htmlcontent, 'html.parser')
desired_content = soup.find(class_='td-category-description')
print("############################################################")
print("############################################################")
data_header = desired_content.find_all('h2')
print("############################################################")
print("############################################################")
print("############################################################")
print("############################################################")
data_items = desired_content.find_all('div', class_ = 'su-table su-table-alternate')
print("############################################################")
print("############################################################")
print("############################################################")
i=0
for item in data_items:
print("############################################################")
eachrow = item.find_all('tr')
for tabledata in eachrow:
print(tabledata.get_text())
file1.writelines(tabledata.get_text())
i=i+1
print("\n",i)
print("############################################################")
file1.close()
| true | true |
f7176c890702cb23e4dd9472f56d2732e8d22b76 | 7,927 | py | Python | tests_python/tests_008/test_basic.py | arvidj/tezos | 9d9e75425ebd603e9e6b9158d573424cd74e9a30 | [
"MIT"
] | null | null | null | tests_python/tests_008/test_basic.py | arvidj/tezos | 9d9e75425ebd603e9e6b9158d573424cd74e9a30 | [
"MIT"
] | null | null | null | tests_python/tests_008/test_basic.py | arvidj/tezos | 9d9e75425ebd603e9e6b9158d573424cd74e9a30 | [
"MIT"
] | null | null | null | from os import path
import pytest
from client.client import Client
from tools import utils
from tools.paths import ACCOUNT_PATH
from tools.utils import assert_run_failure
from .contract_paths import CONTRACT_PATH
TRANSFER_ARGS = ['--burn-cap', '0.257']
@pytest.mark.incremental
class TestRawContext:
def test_delegates(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/delegates/?depth=3'
res = client.rpc('get', path)
expected = {
"ed25519": {
"02": {"29": None},
"a9": {"ce": None},
"c5": {"5c": None},
"da": {"c9": None},
"e7": {"67": None},
}
}
assert res == expected
def test_no_service_1(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_no_service_2(self, client: Client):
path = (
'/chains/main/blocks/head/context/raw/bytes/'
'non-existent?depth=-1'
)
expected = 'Command failed: Extraction depth -1 is invalid'
with assert_run_failure(expected):
client.rpc('get', path)
def test_no_service_3(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent?depth=0'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_bake(self, client: Client):
utils.bake(client, 'bootstrap4')
def test_gen_keys(self, client: Client, session):
session['keys'] = ['foo', 'bar', 'boo']
sigs = [None, 'secp256k1', 'ed25519']
for key, sig in zip(session['keys'], sigs):
args = [] if sig is None else ['--sig', sig]
client.gen_key(key, args)
def test_transfers(self, client: Client, session):
client.transfer(1000, 'bootstrap1', session['keys'][0], TRANSFER_ARGS)
utils.bake(client)
client.transfer(2000, 'bootstrap1', session['keys'][1], TRANSFER_ARGS)
utils.bake(client)
client.transfer(3000, 'bootstrap1', session['keys'][2], TRANSFER_ARGS)
utils.bake(client)
def test_balances(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 1000
assert client.get_balance(session['keys'][1]) == 2000
assert client.get_balance(session['keys'][2]) == 3000
def test_transfer_bar_foo(self, client: Client, session):
client.transfer(
1000,
session['keys'][1],
session['keys'][0],
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
def test_balances_bar_foo(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 2000
assert client.get_balance(session['keys'][1]) == 1000
def test_transfer_foo_bar(self, client: Client, session):
client.transfer(
1000, session['keys'][0], session['keys'][1], ['--fee', '0.05']
)
utils.bake(client)
def test_balances_foo_bar(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 999.95
assert client.get_balance(session['keys'][1]) == 2000
def test_transfer_failure(self, client: Client, session):
with pytest.raises(Exception):
client.transfer(999.95, session['keys'][0], session['keys'][1])
def test_originate_contract_noop(self, client: Client):
contract = path.join(CONTRACT_PATH, 'opcodes', 'noop.tz')
client.remember('noop', contract)
client.typecheck(contract)
client.originate(
'noop', 1000, 'bootstrap1', contract, ['--burn-cap', '0.295']
)
utils.bake(client)
def test_transfer_to_noop(self, client: Client):
client.transfer(10, 'bootstrap1', 'noop', ['--arg', 'Unit'])
utils.bake(client)
def test_contract_hardlimit(self, client: Client):
contract = path.join(CONTRACT_PATH, 'mini_scenarios', 'hardlimit.tz')
client.originate(
'hardlimit',
1000,
'bootstrap1',
contract,
['--init', '3', '--burn-cap', '0.341'],
)
utils.bake(client)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
utils.bake(client)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
utils.bake(client)
def test_transfers_bootstraps5_bootstrap1(self, client: Client):
assert client.get_balance('bootstrap5') == 4000000
client.transfer(
400000,
'bootstrap5',
'bootstrap1',
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
client.transfer(
400000,
'bootstrap1',
'bootstrap5',
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
assert client.get_balance('bootstrap5') == 4000000
def test_activate_accounts(self, client: Client, session):
account = f"{ACCOUNT_PATH}/king_commitment.json"
session['keys'] += ['king', 'queen']
client.activate_account(session['keys'][3], account)
utils.bake(client)
account = f"{ACCOUNT_PATH}/queen_commitment.json"
client.activate_account(session['keys'][4], account)
utils.bake(client)
assert client.get_balance(session['keys'][3]) == 23932454.669343
assert client.get_balance(session['keys'][4]) == 72954577.464032
def test_transfer_king_queen(self, client: Client, session):
keys = session['keys']
client.transfer(10, keys[3], keys[4], TRANSFER_ARGS)
utils.bake(client)
def test_duplicate_alias(self, client: Client):
client.add_address("baz", "foo", force=True)
show_foo = client.show_address("foo", show_secret=True)
assert show_foo.secret_key is not None
class TestRememberContract:
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_not_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(contract_name, non_originated_contract_address)
# As it is always the same client, the contracts have been saved
# before
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_with_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(
contract_name, non_originated_contract_address, force=True
)
# As it is always the same client, the contracts have been saved
# before
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
expected_error = f"The contract alias {contract_name} already exists"
with assert_run_failure(expected_error):
client.remember_contract(
contract_name, non_originated_contract_address, force=False
)
| 35.707207 | 80 | 0.608931 | from os import path
import pytest
from client.client import Client
from tools import utils
from tools.paths import ACCOUNT_PATH
from tools.utils import assert_run_failure
from .contract_paths import CONTRACT_PATH
TRANSFER_ARGS = ['--burn-cap', '0.257']
@pytest.mark.incremental
class TestRawContext:
def test_delegates(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/delegates/?depth=3'
res = client.rpc('get', path)
expected = {
"ed25519": {
"02": {"29": None},
"a9": {"ce": None},
"c5": {"5c": None},
"da": {"c9": None},
"e7": {"67": None},
}
}
assert res == expected
def test_no_service_1(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_no_service_2(self, client: Client):
path = (
'/chains/main/blocks/head/context/raw/bytes/'
'non-existent?depth=-1'
)
expected = 'Command failed: Extraction depth -1 is invalid'
with assert_run_failure(expected):
client.rpc('get', path)
def test_no_service_3(self, client: Client):
path = '/chains/main/blocks/head/context/raw/bytes/non-existent?depth=0'
with assert_run_failure('No service found at this URL'):
client.rpc('get', path)
def test_bake(self, client: Client):
utils.bake(client, 'bootstrap4')
def test_gen_keys(self, client: Client, session):
session['keys'] = ['foo', 'bar', 'boo']
sigs = [None, 'secp256k1', 'ed25519']
for key, sig in zip(session['keys'], sigs):
args = [] if sig is None else ['--sig', sig]
client.gen_key(key, args)
def test_transfers(self, client: Client, session):
client.transfer(1000, 'bootstrap1', session['keys'][0], TRANSFER_ARGS)
utils.bake(client)
client.transfer(2000, 'bootstrap1', session['keys'][1], TRANSFER_ARGS)
utils.bake(client)
client.transfer(3000, 'bootstrap1', session['keys'][2], TRANSFER_ARGS)
utils.bake(client)
def test_balances(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 1000
assert client.get_balance(session['keys'][1]) == 2000
assert client.get_balance(session['keys'][2]) == 3000
def test_transfer_bar_foo(self, client: Client, session):
client.transfer(
1000,
session['keys'][1],
session['keys'][0],
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
def test_balances_bar_foo(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 2000
assert client.get_balance(session['keys'][1]) == 1000
def test_transfer_foo_bar(self, client: Client, session):
client.transfer(
1000, session['keys'][0], session['keys'][1], ['--fee', '0.05']
)
utils.bake(client)
def test_balances_foo_bar(self, client: Client, session):
assert client.get_balance(session['keys'][0]) == 999.95
assert client.get_balance(session['keys'][1]) == 2000
def test_transfer_failure(self, client: Client, session):
with pytest.raises(Exception):
client.transfer(999.95, session['keys'][0], session['keys'][1])
def test_originate_contract_noop(self, client: Client):
contract = path.join(CONTRACT_PATH, 'opcodes', 'noop.tz')
client.remember('noop', contract)
client.typecheck(contract)
client.originate(
'noop', 1000, 'bootstrap1', contract, ['--burn-cap', '0.295']
)
utils.bake(client)
def test_transfer_to_noop(self, client: Client):
client.transfer(10, 'bootstrap1', 'noop', ['--arg', 'Unit'])
utils.bake(client)
def test_contract_hardlimit(self, client: Client):
contract = path.join(CONTRACT_PATH, 'mini_scenarios', 'hardlimit.tz')
client.originate(
'hardlimit',
1000,
'bootstrap1',
contract,
['--init', '3', '--burn-cap', '0.341'],
)
utils.bake(client)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
utils.bake(client)
client.transfer(10, 'bootstrap1', 'hardlimit', ['--arg', 'Unit'])
utils.bake(client)
def test_transfers_bootstraps5_bootstrap1(self, client: Client):
assert client.get_balance('bootstrap5') == 4000000
client.transfer(
400000,
'bootstrap5',
'bootstrap1',
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
client.transfer(
400000,
'bootstrap1',
'bootstrap5',
['--fee', '0', '--force-low-fee'],
)
utils.bake(client)
assert client.get_balance('bootstrap5') == 4000000
def test_activate_accounts(self, client: Client, session):
account = f"{ACCOUNT_PATH}/king_commitment.json"
session['keys'] += ['king', 'queen']
client.activate_account(session['keys'][3], account)
utils.bake(client)
account = f"{ACCOUNT_PATH}/queen_commitment.json"
client.activate_account(session['keys'][4], account)
utils.bake(client)
assert client.get_balance(session['keys'][3]) == 23932454.669343
assert client.get_balance(session['keys'][4]) == 72954577.464032
def test_transfer_king_queen(self, client: Client, session):
keys = session['keys']
client.transfer(10, keys[3], keys[4], TRANSFER_ARGS)
utils.bake(client)
def test_duplicate_alias(self, client: Client):
client.add_address("baz", "foo", force=True)
show_foo = client.show_address("foo", show_secret=True)
assert show_foo.secret_key is not None
class TestRememberContract:
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_not_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(contract_name, non_originated_contract_address)
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_with_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
client.remember_contract(
contract_name, non_originated_contract_address, force=True
)
@pytest.mark.parametrize(
"contract_name,non_originated_contract_address",
[
("test", "KT1BuEZtb68c1Q4yjtckcNjGELqWt56Xyesc"),
("test-2", "KT1TZCh8fmUbuDqFxetPWC2fsQanAHzLx4W9"),
],
)
def test_non_originated_contract_no_forcing_and_saved_before(
self,
client,
contract_name,
non_originated_contract_address,
):
expected_error = f"The contract alias {contract_name} already exists"
with assert_run_failure(expected_error):
client.remember_contract(
contract_name, non_originated_contract_address, force=False
)
| true | true |
f7176e98b7a02c6157bf7280dc45d7ede12e9f2b | 2,915 | py | Python | Bio/Geo/Record.py | bneron/biopython | 2c52e57661c8f6cdf4a191850b2f6871f8582af7 | [
"PostgreSQL"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | Bio/Geo/Record.py | bneron/biopython | 2c52e57661c8f6cdf4a191850b2f6871f8582af7 | [
"PostgreSQL"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | Bio/Geo/Record.py | bneron/biopython | 2c52e57661c8f6cdf4a191850b2f6871f8582af7 | [
"PostgreSQL"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | # Copyright 2001 by Katharine Lindner. All rights reserved.
# Copyright 2006 by PeterC. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Hold GEO data in a straightforward format.
classes:
o Record - All of the information in an GEO record.
See http://www.ncbi.nlm.nih.gov/geo/
"""
from __future__ import print_function
class Record(object):
"""Hold GEO information in a format similar to the original record.
The Record class is meant to make data easy to get to when you are
just interested in looking at GEO data.
Attributes:
entity_type
entity_id
entity_attributes
col_defs
table_rows
"""
def __init__(self):
self.entity_type = ''
self.entity_id = ''
self.entity_attributes = {}
self.col_defs = {}
self.table_rows = []
def __str__(self):
output = ''
output += 'GEO Type: %s\n' % self.entity_type
output += 'GEO Id: %s\n' % self.entity_id
att_keys = sorted(self.entity_attributes)
for key in att_keys:
contents = self.entity_attributes[key]
if isinstance(contents, list):
for item in contents:
try:
output += '%s: %s\n' % (key, item[:40])
output += out_block(item[40:])
except:
pass
elif isinstance(contents, str):
output += '%s: %s\n' % (key, contents[:40])
output += out_block(contents[40:])
else:
print(contents)
output += '%s: %s\n' % (key, contents[:40])
output += out_block(contents[40:])
col_keys = sorted(self.col_defs)
output += 'Column Header Definitions\n'
for key in col_keys:
val = self.col_defs[key]
output += ' %s: %s\n' % (key, val[:40])
output += out_block(val[40:], ' ')
# May have to display VERY large tables,
# so only show the first 20 lines of data
MAX_ROWS = 20 + 1 # include header in count
for row in self.table_rows[0:MAX_ROWS]:
output += '%s: ' % self.table_rows.index(row)
for col in row:
output += '%s\t' % col
output += '\n'
if len(self.table_rows) > MAX_ROWS:
output += '...\n'
row = self.table_rows[-1]
output += '%s: ' % self.table_rows.index(row)
for col in row:
output += '%s\t' % col
output += '\n'
return output
def out_block(text, prefix=''):
output = ''
for j in range(0, len(text), 80):
output += '%s%s\n' % (prefix, text[j:j + 80])
output += '\n'
return output
| 32.032967 | 71 | 0.540995 |
from __future__ import print_function
class Record(object):
def __init__(self):
self.entity_type = ''
self.entity_id = ''
self.entity_attributes = {}
self.col_defs = {}
self.table_rows = []
def __str__(self):
output = ''
output += 'GEO Type: %s\n' % self.entity_type
output += 'GEO Id: %s\n' % self.entity_id
att_keys = sorted(self.entity_attributes)
for key in att_keys:
contents = self.entity_attributes[key]
if isinstance(contents, list):
for item in contents:
try:
output += '%s: %s\n' % (key, item[:40])
output += out_block(item[40:])
except:
pass
elif isinstance(contents, str):
output += '%s: %s\n' % (key, contents[:40])
output += out_block(contents[40:])
else:
print(contents)
output += '%s: %s\n' % (key, contents[:40])
output += out_block(contents[40:])
col_keys = sorted(self.col_defs)
output += 'Column Header Definitions\n'
for key in col_keys:
val = self.col_defs[key]
output += ' %s: %s\n' % (key, val[:40])
output += out_block(val[40:], ' ')
MAX_ROWS = 20 + 1
for row in self.table_rows[0:MAX_ROWS]:
output += '%s: ' % self.table_rows.index(row)
for col in row:
output += '%s\t' % col
output += '\n'
if len(self.table_rows) > MAX_ROWS:
output += '...\n'
row = self.table_rows[-1]
output += '%s: ' % self.table_rows.index(row)
for col in row:
output += '%s\t' % col
output += '\n'
return output
def out_block(text, prefix=''):
output = ''
for j in range(0, len(text), 80):
output += '%s%s\n' % (prefix, text[j:j + 80])
output += '\n'
return output
| true | true |
f7176eb2dd972a167ede03275af13333e556edee | 829 | py | Python | setup.py | brandjon/iast | 23961536c3bfb5d8fce39c28214ea88b8072450c | [
"PSF-2.0"
] | 11 | 2015-01-04T08:40:09.000Z | 2021-03-24T03:56:34.000Z | setup.py | brandjon/iast | 23961536c3bfb5d8fce39c28214ea88b8072450c | [
"PSF-2.0"
] | null | null | null | setup.py | brandjon/iast | 23961536c3bfb5d8fce39c28214ea88b8072450c | [
"PSF-2.0"
] | null | null | null | from setuptools import setup
setup(
name = 'iAST',
version = '0.2.1',
url = 'https://github.com/brandjon/iast',
author = 'Jon Brandvein',
author_email = 'jon.brandvein@gmail.com',
license = 'MIT License',
description = 'A library for defining and manipulating ASTs',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = ['iast', 'iast.asdl', 'iast.python'],
package_data = {'iast.asdl': ['*.asdl']},
test_suite = 'tests',
install_requires = ['simplestruct >=0.2.1'],
)
| 29.607143 | 71 | 0.546441 | from setuptools import setup
setup(
name = 'iAST',
version = '0.2.1',
url = 'https://github.com/brandjon/iast',
author = 'Jon Brandvein',
author_email = 'jon.brandvein@gmail.com',
license = 'MIT License',
description = 'A library for defining and manipulating ASTs',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = ['iast', 'iast.asdl', 'iast.python'],
package_data = {'iast.asdl': ['*.asdl']},
test_suite = 'tests',
install_requires = ['simplestruct >=0.2.1'],
)
| true | true |
f717706b17d80b336156b8f3eb5d703f5a5b7596 | 3,082 | py | Python | airbyte-integrations/connectors/source-paypal-transaction/bin/fixture_helper.py | luizgribeiro/airbyte | 71a96f5417b678c39b34e2e92234d8a51529e086 | [
"MIT"
] | 2 | 2021-08-04T03:17:38.000Z | 2021-11-15T10:16:08.000Z | airbyte-integrations/connectors/source-paypal-transaction/bin/fixture_helper.py | luizgribeiro/airbyte | 71a96f5417b678c39b34e2e92234d8a51529e086 | [
"MIT"
] | 52 | 2021-06-11T12:39:05.000Z | 2022-03-30T04:59:35.000Z | airbyte-integrations/connectors/source-paypal-transaction/bin/fixture_helper.py | luizgribeiro/airbyte | 71a96f5417b678c39b34e2e92234d8a51529e086 | [
"MIT"
] | 2 | 2021-12-14T17:15:40.000Z | 2021-12-14T17:18:03.000Z | #
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
from pprint import pprint
# %%
import requests
logging.basicConfig(level=logging.DEBUG)
# %%
specification = {
"client_id": "REPLACE_ME",
"secret": "REPLACE_ME",
"start_date": "2021-06-01T00:00:00+00:00",
"end_date": "2021-06-30T00:00:00+00:00",
"is_sandbox": True,
}
# %% READ <client_id> and <secret>
client_id = specification.get("client_id")
secret = specification.get("secret")
# %% GET API_TOKEN
token_refresh_endpoint = "https://api-m.sandbox.paypal.com/v1/oauth2/token"
data = "grant_type=client_credentials"
headers = {
"Accept": "application/json",
"Accept-Language": "en_US",
}
response = requests.request(
method="POST",
url=token_refresh_endpoint,
data=data,
headers=headers,
auth=(client_id, secret),
)
response_json = response.json()
print(response_json)
API_TOKEN = response_json["access_token"]
# CREATE TRANSACTIONS
# for i in range(1000):
# create_response = requests.post(
# "https://api-m.sandbox.paypal.com/v2/checkout/orders",
# headers={'content-type': 'application/json', 'authorization': f'Bearer {API_TOKEN}', "prefer": "return=representation"},
# json={
# "intent": "CAPTURE",
# "purchase_units": [
# {
# "amount": {
# "currency_code": "USD",
# "value": f"{float(i)}"
# }
# }
# ]
# }
# )
#
# print(create_response.json())
# %% LIST TRANSACTIONS
url = "https://api-m.sandbox.paypal.com/v1/reporting/transactions"
params = {
"start_date": "2021-06-20T00:00:00+00:00",
"end_date": "2021-07-10T07:19:45Z",
"fields": "all",
"page_size": "100",
"page": "1",
}
headers = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json",
}
response = requests.get(
url,
headers=headers,
params=params,
)
pprint(response.json())
| 28.018182 | 130 | 0.658014 |
import logging
from pprint import pprint
import requests
logging.basicConfig(level=logging.DEBUG)
specification = {
"client_id": "REPLACE_ME",
"secret": "REPLACE_ME",
"start_date": "2021-06-01T00:00:00+00:00",
"end_date": "2021-06-30T00:00:00+00:00",
"is_sandbox": True,
}
client_id = specification.get("client_id")
secret = specification.get("secret")
token_refresh_endpoint = "https://api-m.sandbox.paypal.com/v1/oauth2/token"
data = "grant_type=client_credentials"
headers = {
"Accept": "application/json",
"Accept-Language": "en_US",
}
response = requests.request(
method="POST",
url=token_refresh_endpoint,
data=data,
headers=headers,
auth=(client_id, secret),
)
response_json = response.json()
print(response_json)
API_TOKEN = response_json["access_token"]
url = "https://api-m.sandbox.paypal.com/v1/reporting/transactions"
params = {
"start_date": "2021-06-20T00:00:00+00:00",
"end_date": "2021-07-10T07:19:45Z",
"fields": "all",
"page_size": "100",
"page": "1",
}
headers = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json",
}
response = requests.get(
url,
headers=headers,
params=params,
)
pprint(response.json())
| true | true |
f71770a671bd24aaba89e7db742a94eb08713f1f | 2,095 | py | Python | test/main_test.py | vaughn-johnson/talkspace-public-api | 20eca278e8ac651f610d6afaff8fdc3fce2918fc | [
"MIT"
] | null | null | null | test/main_test.py | vaughn-johnson/talkspace-public-api | 20eca278e8ac651f610d6afaff8fdc3fce2918fc | [
"MIT"
] | 6 | 2020-11-19T04:25:05.000Z | 2020-11-20T20:53:32.000Z | test/main_test.py | vaughn-johnson/talkspace-public-api | 20eca278e8ac651f610d6afaff8fdc3fce2918fc | [
"MIT"
] | null | null | null | from unittest.mock import MagicMock, Mock, patch
from .mock_mongo import MockPyMongo
import json
import sys
import os
### TEST SETUP ###
EXPECTED_DATA_FRAME_FILENAME = os.path.join(os.path.dirname(__file__),
'expected_response.json')
EXPECTED_DATA_FRAME = open(EXPECTED_DATA_FRAME_FILENAME).read()
sys.modules['pymongo'] = MockPyMongo
cloud_mock = MagicMock()
sys.modules['google.cloud'] = cloud_mock
##################
from src.main import _get_data, _refresh_data # noqa: E402
def test_snapshot():
expected = _pretty_print(EXPECTED_DATA_FRAME)
observed = _pretty_print(f'{_get_data().to_json()}')
assert expected == observed
def test_cold_cache_json():
_blob().return_value.exists = lambda: False
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('json')
find.assert_called_once()
def test_cold_cache_csv():
_blob().return_value.exists = lambda: False
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('csv')
find.assert_called_once()
def test_warm_cache_json():
_blob().return_value.exists = lambda: True
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('json')
find.assert_not_called()
def test_warm_cache_csv():
_blob().return_value.exists = lambda: True
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('csv')
find.assert_not_called()
def _pretty_print(json_string):
json.dumps(
json.loads(json_string),
indent=2,
sort_keys=True
)
def _blob():
return cloud_mock.storage\
.Client.return_value\
.bucket.return_value\
.blob
| 26.858974 | 70 | 0.686396 | from unittest.mock import MagicMock, Mock, patch
from .mock_mongo import MockPyMongo
import json
import sys
import os
.join(os.path.dirname(__file__),
'expected_response.json')
EXPECTED_DATA_FRAME = open(EXPECTED_DATA_FRAME_FILENAME).read()
sys.modules['pymongo'] = MockPyMongo
cloud_mock = MagicMock()
sys.modules['google.cloud'] = cloud_mock
{_get_data().to_json()}')
assert expected == observed
def test_cold_cache_json():
_blob().return_value.exists = lambda: False
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('json')
find.assert_called_once()
def test_cold_cache_csv():
_blob().return_value.exists = lambda: False
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('csv')
find.assert_called_once()
def test_warm_cache_json():
_blob().return_value.exists = lambda: True
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('json')
find.assert_not_called()
def test_warm_cache_csv():
_blob().return_value.exists = lambda: True
_blob().return_value.download_as_string.return_value = '{}'
find = MockPyMongo.MongoClient().talkspace.messages.find
find.reset_mock()
_refresh_data('csv')
find.assert_not_called()
def _pretty_print(json_string):
json.dumps(
json.loads(json_string),
indent=2,
sort_keys=True
)
def _blob():
return cloud_mock.storage\
.Client.return_value\
.bucket.return_value\
.blob
| true | true |
f7177131572435ea6057a138ae45fca472fca8a1 | 50,438 | py | Python | train/comms/pt/comms.py | caogao/param | 9de2602c894df264a004c352ee16abc14f93da76 | [
"MIT"
] | null | null | null | train/comms/pt/comms.py | caogao/param | 9de2602c894df264a004c352ee16abc14f93da76 | [
"MIT"
] | null | null | null | train/comms/pt/comms.py | caogao/param | 9de2602c894df264a004c352ee16abc14f93da76 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import time
import comms_utils
import numpy as np
# pytorch
import torch
from comms_utils import paramCommsBench, ensureTensorFlush
### TODO: add these to class variables?
supportedCollectives = [
"reduce",
"all_reduce",
"all_to_all",
"all_to_allv",
"all_gather",
"broadcast",
"reduce_scatter",
"reduce_scatter_base",
"all_gather_base",
"incast",
"multicast",
] # , "scatter", "gather"]
pt2ptPatterns = [
"one2one",
"pairwise",
]
logger = logging.getLogger(__name__)
class MultilineFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width):
if text.startswith("R|"):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.ArgumentDefaultsHelpFormatter._split_lines(self, text, width)
# define the collective benchmark
class commsCollBench(paramCommsBench):
def __init__(self):
super().__init__(supportedNwstacks=["pytorch-dist", "pytorch-xla-tpu"])
# def readCollArgs(self, parser):
def readArgs(self, parser):
# read the common/basic arguments
super().readArgs(parser)
parser.add_argument(
"--w", type=int, default=5, help="number of warmup iterations"
) # number of warmup-iterations
parser.add_argument(
"--n", type=int, default=5, help="number of iterations"
) # number of iterations
# experiment related parameters
parser.add_argument(
"--mode",
type=str,
default="comms",
help="benchmark mode",
choices=["comms", "compute", "dlrm", "comms-compute"],
) # alternative is DLRM mode or comm-compute mode
parser.add_argument(
"--b", type=str, default="8", help="minimum size, in bytes, to start with"
) # COMMS mode, begin the sweep at.
parser.add_argument(
"--e", type=str, default="64", help="maximum size, in bytes, to end at"
) # COMMS mode, end the sweep at.
parser.add_argument(
"--f", type=int, default=2, help="multiplication factor between sizes"
) # COMMS mode, multiplication factor.
parser.add_argument(
"--collective",
type=str,
default="all_reduce",
help="Collective operation to be evaluated",
choices=supportedCollectives,
) # collective op to benchmark
# For comm-compute or compute mode
parser.add_argument(
"--kernel",
type=str,
default="gemm",
help="Compute kernel, used for comms-compute or compute mode",
choices=["gemm", "emb_lookup"],
) # Compute kernel: "gemm"
parser.add_argument(
"--num-compute",
type=int,
default=100,
help="one collective for every NUM_COMPUTE compute kernels",
) # Launch one coll for every n compute kernels
# For GEMM
parser.add_argument(
"--mm-dim",
type=int,
default=100,
help="dimension size for GEMM compute kernel",
) # Matrix multiplication dim n, A[n,n] * B [n,n]
# For emb lookup
parser.add_argument(
"--emb-dim",
type=int,
default=128,
help="dimension size for Embedding table compute kernel",
) # Embedding table dimension
parser.add_argument(
"--num-embs",
type=int,
default=100000,
help="Embedding table hash size for Embedding table compute kernel",
) # Embedding table hash size
parser.add_argument(
"--avg-len",
type=int,
default=28,
help="Average lookup operations per sample",
) # Average #lookup per sample
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="number of samples reading the table concurrently",
) # #Samples reading the table concurrently
parser.add_argument(
"--root", type=int, default=0, help="root process for reduce benchmark"
) # root process for reduce and bcast (and gather, scatter, etc., if support in the future)
# TODO: check the correctness of root, should be between 0 to [world_size -1]
parser.add_argument(
"--src-ranks",
type=str,
nargs="?",
help="R|src ranks for many-to-one incast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank.\n"
"The default value of incast includes all ranks, pt2pt includes rank 0.",
) # optional: group of src ranks in many-to-one incast or pt2pt
parser.add_argument(
"--dst-ranks",
type=str,
nargs="?",
help="R|dst ranks for one-to-many multicast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank\n"
"The default value of multicast includes all ranks, pt2pt includes rank 1.",
) # optional: group of dst ranks in one-to-many multicast or pt2pt
parser.add_argument(
"--pair",
action="store_true",
default=False,
help="Toggle to enable collective pair mode",
)
parser.add_argument(
"--collective-pair",
type=str,
default="all_reduce",
help="Collective pair operation to be evaluated",
choices=supportedCollectives,
) # collective op to pair with the other collective, --collective should be non-empty
parser.add_argument(
"--overlap-pair-pgs",
action="store_true",
default=False,
help="Toggle to enable overlapping collective pair with two pgs",
) # overlap collective pair with two pgs
parser.add_argument(
"--pt2pt",
type=str,
default=None,
help="point to point pattern",
choices=pt2ptPatterns,
) # point to point mode
parser.add_argument(
"--window",
type=int,
default=100,
help="window size for pt2pt throughput test",
) # optional: point to point throughput test window size
return parser.parse_known_args()
def checkArgs(self, args):
super().checkArgs(args)
if args.pt2pt is not None:
args.collective = "pt2pt"
if args.pt2pt not in pt2ptPatterns:
logger.error(
f"Specified pt2pt pattern: {args.pt2pt} is not one of the supported pt2pt patterns: {str(pt2ptPatterns)}"
)
comms_utils.gracefulExit()
args.b = comms_utils.parsesize(args.b)
args.e = comms_utils.parsesize(args.e)
args.dtype = self.dtypeMap[args.data_type]
if args.b < 1:
logger.warning(
f"Starting size (--b {args.b}) should be greater than 1 byte...fix and continue"
)
args.b = 1
if args.e < args.b:
logger.warning(
f"the begin-size (--b {args.b}) is larger than the end-size (--e {args.e})"
)
if args.device == "cpu" and args.backend == "nccl":
raise ValueError(f"NCCL is not supported for device type {args.device}")
if args.c == 1 and args.z == 0 and args.collective in ("all_reduce", "reduce", "reduce_scatter"):
logger.warning(
f"Data validation is not supported for {args.collective} in non-blocking mode, disabled and continue"
)
args.c = 0
# run a few sanity checks
if args.bitwidth < 32:
if args.device != "cuda":
logger.error(
f"collective quantization may not be fully supported for {args.device}"
)
comms_utils.checkQuantArgs(
args.collective,
args.dtype,
args.b,
args.quant_a2a_embedding_dim,
args.z,
)
def runColl(self, comm_fn=None, compute_fn=None, comm_fn_pair=None):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_begin")
elapsedTimeNS = 0.0
is_blocking = not self.collectiveArgs.asyncOp
enable_comms = False if (comm_fn is None or comm_fn == self.backendFuncs.noop) else True
enable_compute = False if (compute_fn is None or compute_fn == self.backendFuncs.noop) else True
enable_comms_pair = False if (comm_fn_pair is None or comm_fn_pair == self.backendFuncs.noop) else True
# for comms pair mode, force async comms for overlapping evaluation
if enable_comms_pair:
self.collectiveArgs.asyncOp = True
for nIter in range(
self.collectiveArgs.numWarmupIters + self.collectiveArgs.numIters
):
if nIter == self.collectiveArgs.numWarmupIters:
# Flush non-blocking ops to ensure warmup is really complete
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
# Start measuring time after warmup iterations
elapsedTimeNS = 0.0
self.collectiveArgs.quant_time.reset()
self.collectiveArgs.dequant_time.reset()
# reset tensor values for data validation check
if enable_comms:
self.setTensorVal(self.collectiveArgs.opTensor)
# for blocking mode, do barrier before starting collective
if is_blocking:
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic() # available only in py3
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn(self.collectiveArgs)
# post another collecitve if on comms pair mode, otherwise it's noop
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn_pair(self.collectiveArgs, pair=enable_comms_pair)
if enable_compute:
for _ in range(self.collectiveArgs.numComputePerColl):
# TODO: investigate the cache effect
# Flush the cache
# _ = torch.rand(6 * 1024 * 1024 // 4).float() * 2 # V100 6MB L2 cache
compute_fn(self.collectiveArgs)
if is_blocking: # should be sychronous, wait for the collective
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
# Measuring time.
elapsedTimeNS += (
time.monotonic() - start
) * 1e9 # keeping time in NS, helps in divising data by nanosecond
start = time.monotonic() # available only in py3
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
end = time.monotonic() # available only in py3
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
elapsedTimeNS += (
end - start
) * 1e9 # keeping time in NS, helps in divising data by nanoseconds
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
avgIterNS, algBW = comms_utils.getAlgBW(
elapsedTimeNS, memSize, self.collectiveArgs.numIters
)
busBW = self.backendFuncs.getBusBW(
self.collectiveArgs.collective,
algBW,
self.collectiveArgs,
)
if enable_comms_pair:
memSize_pair = self.backendFuncs.get_mem_size(
self.collectiveArgs, pair=enable_comms_pair
)
memSize += memSize_pair
_, algBW_pair = comms_utils.getAlgBW(
elapsedTimeNS, memSize_pair, self.collectiveArgs.numIters
)
algBW += algBW_pair
busBW += self.backendFuncs.getBusBW(
self.collectiveArgs.collective_pair,
algBW_pair,
self.collectiveArgs,
)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_end")
results = {
"timeUS": avgIterNS / 1e3,
"algBW": algBW,
"busBW": busBW,
"memSize": memSize,
}
return results
def runPt2Pt(self):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
# warm-up
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
self.getPingLatency(self.collectiveArgs.numWarmupIters)
self.getPingPongLatency(self.collectiveArgs.numWarmupIters)
self.getUniBW(self.collectiveArgs.numWarmupIters, memSize)
self.getBiBW(self.collectiveArgs.numWarmupIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt_begin")
# pt2pt benchmark
pingPerIterNS = self.getPingLatency(self.collectiveArgs.numIters)
pingPongPerIterNS = self.getPingPongLatency(self.collectiveArgs.numIters)
avgUniBW = self.getUniBW(self.collectiveArgs.numIters, memSize)
avgBiBW = self.getBiBW(self.collectiveArgs.numIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt")
results = {
"pingPerIterNS": pingPerIterNS,
"pingPongPerIterNS": pingPongPerIterNS,
"avgUniBW": avgUniBW,
"avgBiBW": avgBiBW,
"memSize": memSize,
}
return results
def getPingLatency(self, numIters):
logger.debug(
"STATUS: begin ping test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get one-way latency
pingLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping test.")
return pingLatencyNS
def getPingPongLatency(self, numIters):
logger.debug(
"STATUS: begin ping-pong with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get round-trip latency
pingPongLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingPongLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping-pong test.")
return pingPongLatencyNS
def getUniBW(self, numIters, memSize):
logger.debug(
"STATUS: begin UniBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get unidirectional bandwidth
uniLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
uniLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
uniLatencyNS = [lat / self.collectiveArgs.window for lat in uniLatencyNS]
uniLatencyNS = np.mean(np.array(uniLatencyNS))
_, avgUniBW = comms_utils.getAlgBW(uniLatencyNS, memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgUniBW
def getBiBW(self, numIters, memSize):
logger.debug(
"STATUS: begin BiBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get bidirectional bandwidth
biLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
self.backendFuncs.irecv(
self.collectiveArgs,
self.collectiveArgs.dst_ranks[idx],
tag=w + self.collectiveArgs.window,
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.isend(
self.collectiveArgs,
self.collectiveArgs.src_ranks[idx],
tag=w + self.collectiveArgs.window,
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
biLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
biLatencyNS = [lat / self.collectiveArgs.window for lat in biLatencyNS]
biLatencyNS = np.mean(np.array(biLatencyNS))
_, avgBiBW = comms_utils.getAlgBW(biLatencyNS, 2 * memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgBiBW
def checkPt2PtRanks(self):
# set default values
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [0]
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [1]
# sanity check
if self.collectiveArgs.pt2pt == "one2one":
if (
len(self.collectiveArgs.src_ranks) > 1
or len(self.collectiveArgs.dst_ranks) > 1
):
if self.global_rank == 0:
logger.error(
"One2one Pt2Pt requires only a single rank is specified in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
elif self.collectiveArgs.pt2pt == "pairwise":
# pairwise pt2pt requires identical number of ranks in src_ranks and dst_ranks.
if len(self.collectiveArgs.src_ranks) != len(self.collectiveArgs.dst_ranks):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires identical number of members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
# pairwise pt2pt does not allow same rank to exist in both groups
if bool(
set(self.collectiveArgs.src_ranks).intersection(
self.collectiveArgs.dst_ranks
)
):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires distinct members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}\t{self.collectiveArgs.pt2pt}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def checkCollectiveRanks(self):
if self.collectiveArgs.collective == "incast":
# incast: set default value and exclude root
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks.remove(self.collectiveArgs.srcOrDst)
elif self.collectiveArgs.collective == "multicast":
# multicast: set default value and exclude root
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks.remove(self.collectiveArgs.srcOrDst)
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def initCollectiveArgs(self, commsParams):
# lint was complaining that benchTime was too complex!
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
) = comms_utils.get_rank_details(
self.backendFuncs
) # Getting ranks from backednFuncs object, since we cannot use MPI (e.g.: TPU) to launch all the processes.
self.backendFuncs.sayHello() # Informs us where each process is running.
groups = self.backendFuncs.get_groups()
num_pgs = len(groups)
self.comm_size = world_size
self.global_rank = global_rank
comms_utils.fixBeginSize(
commsParams, world_size
) # Ensuring that all-reduce and all-to-all has atleast one member per rank.
allSizes = comms_utils.getSizes(
commsParams.beginSize, commsParams.endSize, commsParams.stepFactor
) # Given the begin-size, end-size, step-factor what are the message sizes to iterate on.
if global_rank == 0:
print(
f"[Rank {global_rank:>3}] allSizes: {allSizes} local_rank: {local_rank} element_size: {commsParams.element_size}"
)
self.collectiveArgs.group = group
self.collectiveArgs.groups = groups
self.collectiveArgs.num_pgs = num_pgs
self.collectiveArgs.device = curDevice
self.collectiveArgs.world_size = world_size
self.collectiveArgs.numIters = commsParams.numIters
self.collectiveArgs.numWarmupIters = commsParams.numWarmupIters
self.collectiveArgs.global_rank = global_rank
self.collectiveArgs.backendFuncs = self.backendFuncs
self.collectiveArgs.collective = commsParams.collective
op = self.backendFuncs.get_reduce_op("sum")
self.collectiveArgs.op = op
self.collectiveArgs.srcOrDst = commsParams.srcOrDst
self.collectiveArgs.src_ranks = commsParams.src_ranks
self.collectiveArgs.dst_ranks = commsParams.dst_ranks
self.collectiveArgs.pair = commsParams.pair
self.collectiveArgs.collective_pair = commsParams.collective_pair
self.collectiveArgs.pt2pt = commsParams.pt2pt
self.collectiveArgs.window = commsParams.window
self.collectiveArgs.asyncOp = False if commsParams.blockingFlag == 1 else True
if commsParams.bitwidth < 32:
comms_utils.initQuantCommCtx(self.collectiveArgs, commsParams)
if self.collectiveArgs.collective == "pt2pt":
self.checkPt2PtRanks()
else:
self.checkCollectiveRanks()
computeFunc = self.backendFuncs.noop
if (
commsParams.mode != "comms"
): # Compute mode related initialization if not in comms-only mode
if commsParams.kernel == "gemm":
computeFunc = self.backendFuncs.gemm
mm_dim = commsParams.mm_dim
in1 = np.random.rand(mm_dim, mm_dim)
MMin1 = torch.FloatTensor(in1).to(curDevice)
in2 = np.random.rand(mm_dim, mm_dim)
MMin2 = torch.FloatTensor(in2).to(curDevice)
in3 = np.random.rand(mm_dim, mm_dim)
MMin3 = torch.FloatTensor(in3).to(curDevice)
MMout = self.backendFuncs.alloc_empty(
[mm_dim, mm_dim], commsParams.dtype, curDevice
)
self.collectiveArgs.MMout = MMout
self.collectiveArgs.MMin1 = MMin1
self.collectiveArgs.MMin2 = MMin2
self.collectiveArgs.MMin3 = MMin3
self.collectiveArgs.numComputePerColl = commsParams.num_compute
elif commsParams.kernel == "emb_lookup":
computeFunc = self.backendFuncs.emb_lookup
emb_dim = commsParams.emb_dim
num_embeddings = commsParams.num_embs
avg_length = commsParams.avg_len
batch_size = commsParams.batch_size
print(
f"emb_dim {emb_dim} num_embs {num_embeddings} avg_len {avg_length} bs {batch_size}"
)
self.collectiveArgs.EmbWeights = self.backendFuncs.alloc_empty(
[num_embeddings, emb_dim], torch.double, curDevice
)
self.collectiveArgs.TableOffsets = torch.LongTensor(
[0, num_embeddings]
).to(curDevice)
self.collectiveArgs.Indices = torch.LongTensor(
np.random.randint(0, num_embeddings - 1, avg_length * batch_size)
).to(curDevice)
lengths = np.ones((1, batch_size)) * avg_length
flat_lengths = lengths.flatten()
self.collectiveArgs.Offsets = torch.LongTensor(
[0] + np.cumsum(flat_lengths).tolist()
).to(curDevice)
self.collectiveArgs.LookupOut = self.backendFuncs.alloc_empty(
[batch_size, emb_dim], torch.double, curDevice
)
self.collectiveArgs.AvgLengths = avg_length
self.collectiveArgs.numComputePerColl = commsParams.num_compute
return (
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
)
def gatherBenchTime(self, collectiveArgs, commsParams, timeUsElapsedList):
# Push the list to device, then do an all-gather.
timeElapsedTensor = torch.tensor(
timeUsElapsedList, device=self.backendFuncs.get_device()
)
collectiveArgs.opTensor = None
if commsParams.backend != "xla":
timeList = list(torch.ones(
(self.comm_size,) + timeElapsedTensor.shape,
dtype=timeElapsedTensor.dtype,
device=timeElapsedTensor.device,
).unbind(0))
collectiveArgs.opTensor = timeList
collectiveArgs.ipTensor = timeElapsedTensor
collectiveArgs.asyncOp = False
collectiveArgs.dataSize = (
timeElapsedTensor.nelement() * timeElapsedTensor.element_size()
)
collectiveArgs.numElements = timeElapsedTensor.nelement()
# use allgather as all process group should support it
self.backendFuncs.all_gather(collectiveArgs)
self.backendFuncs.complete_accel_ops(collectiveArgs)
return timeList
def printPreamble(self, commsParams):
logger.debug(f"\tcommsParams: {str(commsParams.__dict__)}")
header = "\n\tCOMMS-RES"
if self.collectiveArgs.collective == "pt2pt":
header += "{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
"size (B)",
"pingLatency(us):p50",
"p75",
"p95",
"pingPongLatency(us):p50",
"p75",
"p95",
"avgUniBW(GB/s)",
"avgBiBW(GB/s)",
"totalUniBW(GB/s)",
"totalBiBW(GB/s)",
)
else:
if commsParams.bitwidth < 32:
header += "-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
"size (B)",
"nElementsPerRank",
"P95 Latency(us): Quant",
"Comms",
"De-Quant",
"Overall",
)
elif not self.collectiveArgs.pair:
header += (
"{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"size (B)",
"nElementsPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
)
else:
header += "{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"total-size (B)",
"nElementsPerRank",
"nElementsPairPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
print(header)
def reportBenchTimeCollWithQuant(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
# quant tensor
quantLatencyAcrossRanks = torch.transpose(
quantTimeTensorList.view(-1, 1), 0, 1
)[0]
quantLatencyAcrossRanks = quantLatencyAcrossRanks.cpu().detach().numpy()
# dequant tensor
dequantLatencyAcrossRanks = torch.transpose(
dequantTimeTensorList.view(-1, 1), 0, 1
)[0]
dequantLatencyAcrossRanks = dequantLatencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
# quant tensor
quantLatencyAcrossRanks = np.array(quantTimeTensorList)
# dequant tensor
dequantLatencyAcrossRanks = np.array(dequantTimeTensorList)
p95 = np.percentile(latencyAcrossRanks, 95)
quant_p95 = np.percentile(quantLatencyAcrossRanks, 95)
dequant_p95 = np.percentile(dequantLatencyAcrossRanks, 95)
print(
"\tCOMMS-RES-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (quant_p95)),
str("%.1f" % (p95 - quant_p95 - dequant_p95)),
str("%.1f" % (dequant_p95)),
str("%.1f" % (p95)),
# str("%.3f" % (algBW)),
# str("%.3f" % (busBW)),
)
)
def reportBenchTime(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
# convernt num_elements to # of elements per rank
if commsParams.collective in ("all_to_all", "all_to_allv"):
results["numElements"] = int(
results["numElements"] // commsParams.comms_world_info.world_size
)
if commsParams.collective == "pt2pt":
self.reportBenchTimePt2Pt(commsParams, tensorList, results)
elif commsParams.bitwidth < 32:
self.reportBenchTimeCollWithQuant(
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
)
else:
self.reportBenchTimeColl(commsParams, results, tensorList)
def reportBenchTimeColl(self, commsParams, results, tensorList):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
logger.debug(f"Latency across all ranks: {latencyAcrossRanks}")
# Include only communicating ranks
if self.collectiveArgs.collective == "multicast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.dst_ranks
elif self.collectiveArgs.collective == "incast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.src_ranks
else:
commRanks = range(self.collectiveArgs.world_size)
latencyAcrossCommRanks = latencyAcrossRanks[commRanks]
logger.debug(
"Latency across communicating ranks (%s): %s"
% (commRanks, latencyAcrossCommRanks)
)
p50 = np.percentile(latencyAcrossCommRanks, 50)
p75 = np.percentile(latencyAcrossCommRanks, 75)
p95 = np.percentile(latencyAcrossCommRanks, 95)
minlat = np.amin(latencyAcrossCommRanks)
maxlat = np.amax(latencyAcrossCommRanks)
# adjust busBW
busBW = results["busBW"] * (commsParams.bitwidth / 32.0)
if not self.collectiveArgs.pair:
print(
"\tCOMMS-RES{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
else:
# convernt to # of elements per rank
if commsParams.collective_pair in ("all_to_all", "all_to_allv"):
results["numElements_pair"] = int(
results["numElements_pair"]
// commsParams.comms_world_info.world_size
)
print(
"\tCOMMS-RES{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%d" % (results["numElements_pair"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
def reportBenchTimePt2Pt(self, commsParams, resultsAcrossRanks, results):
pingLatencyAcrossRanks = []
pingPongLatencyAcrossRanks = []
uniBWAcrossRanks = []
biBWAcrossRanks = []
# idx = 0
for curRankTensor in resultsAcrossRanks:
pingLatencyAcrossRanks.append(curRankTensor[0].item())
pingPongLatencyAcrossRanks.append(curRankTensor[1].item())
uniBWAcrossRanks.append(curRankTensor[2].item())
biBWAcrossRanks.append(curRankTensor[3].item())
pingLatencyAcrossRanks = np.array(pingLatencyAcrossRanks)
pingPongLatencyAcrossRanks = np.array(pingPongLatencyAcrossRanks)
uniBWAcrossRanks = np.array(uniBWAcrossRanks)
biBWAcrossRanks = np.array(biBWAcrossRanks)
# Include only communicating ranks
commRanks = self.collectiveArgs.src_ranks + self.collectiveArgs.dst_ranks
pingLatencyAcrossCommRanks = pingLatencyAcrossRanks[commRanks]
pingPongLatencyAcrossCommRanks = pingPongLatencyAcrossRanks[commRanks]
uniBWAcrossCommRanks = uniBWAcrossRanks[commRanks]
biBWAcrossCommRanks = biBWAcrossRanks[commRanks]
logger.debug(
"Ping latency across communicating ranks (%s): %s"
% (commRanks, pingLatencyAcrossCommRanks)
)
logger.debug(
"PingPong latency across communicating ranks (%s): %s"
% (commRanks, pingPongLatencyAcrossCommRanks)
)
logger.debug(
"UniBW across all communicating ranks (%s): %s"
% (commRanks, uniBWAcrossCommRanks)
)
logger.debug(
"BiBW across all communicating ranks (%s): %s"
% (commRanks, biBWAcrossCommRanks)
)
avgUniBW = np.mean(uniBWAcrossCommRanks)
avgBiBW = np.mean(biBWAcrossCommRanks)
totalUniBW = np.sum(uniBWAcrossCommRanks) / 2
totalBiBW = np.sum(biBWAcrossCommRanks) / 2
ping_p50 = np.percentile(pingLatencyAcrossCommRanks, 50)
ping_p75 = np.percentile(pingLatencyAcrossCommRanks, 75)
ping_p95 = np.percentile(pingLatencyAcrossCommRanks, 95)
ping_pong_p50 = np.percentile(pingPongLatencyAcrossCommRanks, 50)
ping_pong_p75 = np.percentile(pingPongLatencyAcrossCommRanks, 75)
ping_pong_p95 = np.percentile(pingPongLatencyAcrossCommRanks, 95)
print(
"\tCOMMS-RES{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
results["memSize"],
str("%.1f" % (ping_p50)),
str("%.1f" % (ping_p75)),
str("%.1f" % (ping_p95)),
str("%.1f" % (ping_pong_p50)),
str("%.1f" % (ping_pong_p75)),
str("%.1f" % (ping_pong_p95)),
str("%.3f" % (avgUniBW)),
str("%.3f" % (avgBiBW)),
str("%.3f" % (totalUniBW)),
str("%.3f" % (totalBiBW)),
)
)
def benchTime(self, index, commsParams, backendFuncs):
# Get NW stack specific parameters
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
) = self.initCollectiveArgs(commsParams)
backendFuncs.sync_barrier(self.collectiveArgs)
if global_rank == 0:
self.printPreamble(commsParams)
for curSize in allSizes:
results = {}
timeUsElapsedList = []
quantTimeElapsedList = []
dequantTimeElapsedList = []
numElements = int(curSize // commsParams.element_size)
collectiveFunc = self.backendFuncs.noop
collectiveFunc_pair = self.backendFuncs.noop
if (
commsParams.mode != "compute"
): # comms specific initializations if not in compute-only mode
# set corresponding function pointers
if commsParams.collective != "pt2pt":
collectiveFunc = backendFuncs.collectiveFunc[commsParams.collective]
(
self.collectiveArgs.ipTensor,
self.collectiveArgs.opTensor,
) = self.prepComm(
curComm={
"in_msg_size": numElements,
"out_msg_size": numElements,
"world_size": world_size,
},
commsParams=commsParams,
)
# Setup the arguments.
self.collectiveArgs.dataSize = curSize
self.collectiveArgs.numElements = numElements
self.collectiveArgs.waitObj = []
results["numElements"] = numElements
if (
commsParams.pair and commsParams.mode != "compute"
): # comms-pair specific initializations if not in compute-only mode:
# set corresponding function pointers
collectiveFunc_pair = backendFuncs.collectiveFunc[
commsParams.collective_pair
]
# TODO: allow user to set specific size
# Setup the arguments.
self.collectiveArgs.dataSize_pair = curSize
self.collectiveArgs.numElements_pair = int(
self.collectiveArgs.dataSize_pair // commsParams.element_size
)
results["numElements_pair"] = self.collectiveArgs.numElements_pair
(
self.collectiveArgs.ipTensor_pair,
self.collectiveArgs.opTensor_pair,
) = self.prepComm(
curComm={
"in_msg_size": self.collectiveArgs.numElements_pair,
"out_msg_size": self.collectiveArgs.numElements_pair,
"world_size": world_size,
},
commsParams=commsParams,
)
# self.collectiveArgs has all the information on the experiment.
if commsParams.collective == "pt2pt":
results.update(self.runPt2Pt())
timeUsElapsedList = [
np.mean(np.array(results["pingPerIterNS"])) / 1e3,
np.mean(np.array(results["pingPongPerIterNS"])) / 1e3,
results["avgUniBW"],
results["avgBiBW"],
] # time in US
if (
global_rank in self.collectiveArgs.src_ranks
or global_rank in self.collectiveArgs.dst_ranks
):
logger.debug(timeUsElapsedList)
else:
results.update(
self.runColl(
comm_fn=collectiveFunc,
compute_fn=computeFunc,
comm_fn_pair=collectiveFunc_pair,
)
)
timeUsElapsedList = [results["timeUS"]]
# perfom data validation check on the final opTensor
if commsParams.dcheck == 1:
self.dcheck(commsParams, curSize, self.collectiveArgs.opTensor)
backendFuncs.clear_memory(self.collectiveArgs)
# gather quantization overhead if enabled
if commsParams.bitwidth < 32:
# calculate average (de-)quantization overhead
results["quantTimeUS"] = (
self.collectiveArgs.quant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
results["dequantTimeUS"] = (
self.collectiveArgs.dequant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
quantTimeElapsedList.append(results["quantTimeUS"])
dequantTimeElapsedList.append(results["dequantTimeUS"])
logger.debug(quantTimeElapsedList)
quantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, quantTimeElapsedList
)
dequantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, dequantTimeElapsedList
)
# gather and report performance to stdout
tensorList = self.gatherBenchTime(
self.collectiveArgs, commsParams, timeUsElapsedList
)
if global_rank == 0:
self.reportBenchTime(
commsParams,
results,
tensorList,
quantTimeElapsedList,
dequantTimeElapsedList,
)
self.backendFuncs.sync_barrier(
self.collectiveArgs, desc=f"curSize_{curSize}"
)
comms_utils.clearQuantCommCtx(self.collectiveArgs)
# wait rank 0 reports results to avoid other ranks mess up the output
self.backendFuncs.sync_barrier(self.collectiveArgs, "benchtime")
def runBench(self, comms_world_info, commsParams):
# Init the desired backend
if commsParams.nw_stack == "pytorch-dist":
from pytorch_dist_backend import PyTorchDistBackend
backendObj = PyTorchDistBackend(comms_world_info, commsParams)
elif commsParams.nw_stack == "pytorch-xla-tpu":
from pytorch_tpu_backend import PyTorchTPUBackend
backendObj = PyTorchTPUBackend(comms_world_info, commsParams)
else:
logger.error("Unsupported NW stack! ")
comms_utils.gracefulExit()
self.backendFuncs = backendObj
try:
backendObj.benchmark_comms()
except ValueError as ve:
if commsParams.backend == "ucc":
logger.critical("PyTorch UCC not implemented? {}".format(repr(ve)))
raise
def main():
collBenchObj = commsCollBench()
### parse arguments ###
parser = argparse.ArgumentParser(
description="PARAM-Comm Benchmark",
formatter_class=MultilineFormatter,
)
args, leftovers = collBenchObj.readArgs(parser)
collBenchObj.checkArgs(args)
comms_env_params = comms_utils.read_comms_env_vars()
if comms_env_params["global_rank"] == 0:
print("\t MPI environment: %s " % (str(comms_env_params)))
print(
"\t backend: %s nw-stack: %s mode: %s args.b: %d args.e: %d args.f: %d args.z: %s args.master_ip: %s "
% (
args.backend,
args.nw_stack,
args.mode,
args.b,
args.e,
args.f,
args.z,
args.master_ip,
)
)
element_size = torch.ones([1], dtype=args.dtype).element_size()
comms_world_info = comms_utils.comms_world_info_holder(
args.master_ip, args.master_port, args.num_tpu_cores, comms_env_params
)
commsParams = comms_utils.commsParamsHolder(
args, comms_world_info, element_size, collBenchObj.benchTime
)
if args.pair and args.overlap_pair_pgs:
commsParams.num_pgs = 2
collBenchObj.runBench(comms_world_info, commsParams)
if __name__ == "__main__":
main()
| 41.207516 | 180 | 0.567865 |
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import time
import comms_utils
import numpy as np
import torch
from comms_utils import paramCommsBench, ensureTensorFlush
"all_to_allv",
"all_gather",
"broadcast",
"reduce_scatter",
"reduce_scatter_base",
"all_gather_base",
"incast",
"multicast",
]
pt2ptPatterns = [
"one2one",
"pairwise",
]
logger = logging.getLogger(__name__)
class MultilineFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _split_lines(self, text, width):
if text.startswith("R|"):
return text[2:].splitlines()
return argparse.ArgumentDefaultsHelpFormatter._split_lines(self, text, width)
class commsCollBench(paramCommsBench):
def __init__(self):
super().__init__(supportedNwstacks=["pytorch-dist", "pytorch-xla-tpu"])
def readArgs(self, parser):
super().readArgs(parser)
parser.add_argument(
"--w", type=int, default=5, help="number of warmup iterations"
)
parser.add_argument(
"--n", type=int, default=5, help="number of iterations"
)
parser.add_argument(
"--mode",
type=str,
default="comms",
help="benchmark mode",
choices=["comms", "compute", "dlrm", "comms-compute"],
)
parser.add_argument(
"--b", type=str, default="8", help="minimum size, in bytes, to start with"
)
parser.add_argument(
"--e", type=str, default="64", help="maximum size, in bytes, to end at"
)
parser.add_argument(
"--f", type=int, default=2, help="multiplication factor between sizes"
)
parser.add_argument(
"--collective",
type=str,
default="all_reduce",
help="Collective operation to be evaluated",
choices=supportedCollectives,
)
parser.add_argument(
"--kernel",
type=str,
default="gemm",
help="Compute kernel, used for comms-compute or compute mode",
choices=["gemm", "emb_lookup"],
)
parser.add_argument(
"--num-compute",
type=int,
default=100,
help="one collective for every NUM_COMPUTE compute kernels",
)
parser.add_argument(
"--mm-dim",
type=int,
default=100,
help="dimension size for GEMM compute kernel",
)
parser.add_argument(
"--emb-dim",
type=int,
default=128,
help="dimension size for Embedding table compute kernel",
)
parser.add_argument(
"--num-embs",
type=int,
default=100000,
help="Embedding table hash size for Embedding table compute kernel",
)
parser.add_argument(
"--avg-len",
type=int,
default=28,
help="Average lookup operations per sample",
) d_argument(
"--batch-size",
type=int,
default=512,
help="number of samples reading the table concurrently",
) "--root", type=int, default=0, help="root process for reduce benchmark"
)
parser.add_argument(
"--src-ranks",
type=str,
nargs="?",
help="R|src ranks for many-to-one incast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank.\n"
"The default value of incast includes all ranks, pt2pt includes rank 0.",
)
parser.add_argument(
"--dst-ranks",
type=str,
nargs="?",
help="R|dst ranks for one-to-many multicast pattern or pt2pt.\n"
"List of ranks separated by comma or a range specified by start:end.\n"
"Pt2pt one2one should set only one rank\n"
"The default value of multicast includes all ranks, pt2pt includes rank 1.",
)
parser.add_argument(
"--pair",
action="store_true",
default=False,
help="Toggle to enable collective pair mode",
)
parser.add_argument(
"--collective-pair",
type=str,
default="all_reduce",
help="Collective pair operation to be evaluated",
choices=supportedCollectives,
)
parser.add_argument(
"--overlap-pair-pgs",
action="store_true",
default=False,
help="Toggle to enable overlapping collective pair with two pgs",
)
parser.add_argument(
"--pt2pt",
type=str,
default=None,
help="point to point pattern",
choices=pt2ptPatterns,
)
parser.add_argument(
"--window",
type=int,
default=100,
help="window size for pt2pt throughput test",
)
return parser.parse_known_args()
def checkArgs(self, args):
super().checkArgs(args)
if args.pt2pt is not None:
args.collective = "pt2pt"
if args.pt2pt not in pt2ptPatterns:
logger.error(
f"Specified pt2pt pattern: {args.pt2pt} is not one of the supported pt2pt patterns: {str(pt2ptPatterns)}"
)
comms_utils.gracefulExit()
args.b = comms_utils.parsesize(args.b)
args.e = comms_utils.parsesize(args.e)
args.dtype = self.dtypeMap[args.data_type]
if args.b < 1:
logger.warning(
f"Starting size (--b {args.b}) should be greater than 1 byte...fix and continue"
)
args.b = 1
if args.e < args.b:
logger.warning(
f"the begin-size (--b {args.b}) is larger than the end-size (--e {args.e})"
)
if args.device == "cpu" and args.backend == "nccl":
raise ValueError(f"NCCL is not supported for device type {args.device}")
if args.c == 1 and args.z == 0 and args.collective in ("all_reduce", "reduce", "reduce_scatter"):
logger.warning(
f"Data validation is not supported for {args.collective} in non-blocking mode, disabled and continue"
)
args.c = 0
if args.bitwidth < 32:
if args.device != "cuda":
logger.error(
f"collective quantization may not be fully supported for {args.device}"
)
comms_utils.checkQuantArgs(
args.collective,
args.dtype,
args.b,
args.quant_a2a_embedding_dim,
args.z,
)
def runColl(self, comm_fn=None, compute_fn=None, comm_fn_pair=None):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_begin")
elapsedTimeNS = 0.0
is_blocking = not self.collectiveArgs.asyncOp
enable_comms = False if (comm_fn is None or comm_fn == self.backendFuncs.noop) else True
enable_compute = False if (compute_fn is None or compute_fn == self.backendFuncs.noop) else True
enable_comms_pair = False if (comm_fn_pair is None or comm_fn_pair == self.backendFuncs.noop) else True
if enable_comms_pair:
self.collectiveArgs.asyncOp = True
for nIter in range(
self.collectiveArgs.numWarmupIters + self.collectiveArgs.numIters
):
if nIter == self.collectiveArgs.numWarmupIters:
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
elapsedTimeNS = 0.0
self.collectiveArgs.quant_time.reset()
self.collectiveArgs.dequant_time.reset()
if enable_comms:
self.setTensorVal(self.collectiveArgs.opTensor)
if is_blocking:
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn(self.collectiveArgs)
self.collectiveArgs.group = self.backendFuncs.get_next_group()
comm_fn_pair(self.collectiveArgs, pair=enable_comms_pair)
if enable_compute:
for _ in range(self.collectiveArgs.numComputePerColl):
# TODO: investigate the cache effect
# Flush the cache
# _ = torch.rand(6 * 1024 * 1024 // 4).float() * 2 # V100 6MB L2 cache
compute_fn(self.collectiveArgs)
if is_blocking: # should be sychronous, wait for the collective
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
# Measuring time.
elapsedTimeNS += (
time.monotonic() - start
) * 1e9 # keeping time in NS, helps in divising data by nanosecond
start = time.monotonic() # available only in py3
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
end = time.monotonic() # available only in py3
ensureTensorFlush(self.collectiveArgs.opTensor)
if enable_comms_pair:
ensureTensorFlush(self.collectiveArgs.opTensor_pair)
elapsedTimeNS += (
end - start
) * 1e9 # keeping time in NS, helps in divising data by nanoseconds
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
avgIterNS, algBW = comms_utils.getAlgBW(
elapsedTimeNS, memSize, self.collectiveArgs.numIters
)
busBW = self.backendFuncs.getBusBW(
self.collectiveArgs.collective,
algBW,
self.collectiveArgs,
)
if enable_comms_pair:
memSize_pair = self.backendFuncs.get_mem_size(
self.collectiveArgs, pair=enable_comms_pair
)
memSize += memSize_pair
_, algBW_pair = comms_utils.getAlgBW(
elapsedTimeNS, memSize_pair, self.collectiveArgs.numIters
)
algBW += algBW_pair
busBW += self.backendFuncs.getBusBW(
self.collectiveArgs.collective_pair,
algBW_pair,
self.collectiveArgs,
)
self.backendFuncs.sync_barrier(self.collectiveArgs, desc="runColl_end")
results = {
"timeUS": avgIterNS / 1e3,
"algBW": algBW,
"busBW": busBW,
"memSize": memSize,
}
return results
def runPt2Pt(self):
self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)
# warm-up
memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)
self.getPingLatency(self.collectiveArgs.numWarmupIters)
self.getPingPongLatency(self.collectiveArgs.numWarmupIters)
self.getUniBW(self.collectiveArgs.numWarmupIters, memSize)
self.getBiBW(self.collectiveArgs.numWarmupIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt_begin")
# pt2pt benchmark
pingPerIterNS = self.getPingLatency(self.collectiveArgs.numIters)
pingPongPerIterNS = self.getPingPongLatency(self.collectiveArgs.numIters)
avgUniBW = self.getUniBW(self.collectiveArgs.numIters, memSize)
avgBiBW = self.getBiBW(self.collectiveArgs.numIters, memSize)
self.backendFuncs.sync_barrier(self.collectiveArgs, "runpt2pt")
results = {
"pingPerIterNS": pingPerIterNS,
"pingPongPerIterNS": pingPongPerIterNS,
"avgUniBW": avgUniBW,
"avgBiBW": avgBiBW,
"memSize": memSize,
}
return results
def getPingLatency(self, numIters):
logger.debug(
"STATUS: begin ping test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get one-way latency
pingLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping test.")
return pingLatencyNS
def getPingPongLatency(self, numIters):
logger.debug(
"STATUS: begin ping-pong with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = False
# get round-trip latency
pingPongLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.recv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.send(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx]
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
pingPongLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
logger.debug("STATUS: end ping-pong test.")
return pingPongLatencyNS
def getUniBW(self, numIters, memSize):
logger.debug(
"STATUS: begin UniBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get unidirectional bandwidth
uniLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
uniLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
uniLatencyNS = [lat / self.collectiveArgs.window for lat in uniLatencyNS]
uniLatencyNS = np.mean(np.array(uniLatencyNS))
_, avgUniBW = comms_utils.getAlgBW(uniLatencyNS, memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgUniBW
def getBiBW(self, numIters, memSize):
logger.debug(
"STATUS: begin BiBW test with src_ranks=%s, dst_ranks=%s."
% (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)
)
self.collectiveArgs.asyncOp = True
# get bidirectional bandwidth
biLatencyNS = []
for _ in range(numIters):
self.backendFuncs.sync_barrier(self.collectiveArgs)
start = time.monotonic()
for w in range(self.collectiveArgs.window):
if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:
idx = self.collectiveArgs.src_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.isend(
self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w
)
self.backendFuncs.irecv(
self.collectiveArgs,
self.collectiveArgs.dst_ranks[idx],
tag=w + self.collectiveArgs.window,
)
elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:
idx = self.collectiveArgs.dst_ranks.index(
self.collectiveArgs.global_rank
)
self.backendFuncs.irecv(
self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w
)
self.backendFuncs.isend(
self.collectiveArgs,
self.collectiveArgs.src_ranks[idx],
tag=w + self.collectiveArgs.window,
)
self.backendFuncs.complete_accel_ops(self.collectiveArgs)
biLatencyNS.append(
(time.monotonic() - start) * 1e9
) # keeping time in NS, helps in divising data by nanosecond
biLatencyNS = [lat / self.collectiveArgs.window for lat in biLatencyNS]
biLatencyNS = np.mean(np.array(biLatencyNS))
_, avgBiBW = comms_utils.getAlgBW(biLatencyNS, 2 * memSize, 1)
logger.debug("STATUS: end UniBW test.")
return avgBiBW
def checkPt2PtRanks(self):
# set default values
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [0]
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [1]
# sanity check
if self.collectiveArgs.pt2pt == "one2one":
if (
len(self.collectiveArgs.src_ranks) > 1
or len(self.collectiveArgs.dst_ranks) > 1
):
if self.global_rank == 0:
logger.error(
"One2one Pt2Pt requires only a single rank is specified in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
elif self.collectiveArgs.pt2pt == "pairwise":
# pairwise pt2pt requires identical number of ranks in src_ranks and dst_ranks.
if len(self.collectiveArgs.src_ranks) != len(self.collectiveArgs.dst_ranks):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires identical number of members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
# pairwise pt2pt does not allow same rank to exist in both groups
if bool(
set(self.collectiveArgs.src_ranks).intersection(
self.collectiveArgs.dst_ranks
)
):
if self.global_rank == 0:
logger.error(
"Pairwise Pt2Pt requires distinct members in src_ranks and dst_ranks! "
)
comms_utils.gracefulExit()
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}\t{self.collectiveArgs.pt2pt}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def checkCollectiveRanks(self):
if self.collectiveArgs.collective == "incast":
# incast: set default value and exclude root
if not self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.src_ranks:
self.collectiveArgs.src_ranks.remove(self.collectiveArgs.srcOrDst)
elif self.collectiveArgs.collective == "multicast":
# multicast: set default value and exclude root
if not self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks = [*range(self.comm_size)]
if self.collectiveArgs.srcOrDst in self.collectiveArgs.dst_ranks:
self.collectiveArgs.dst_ranks.remove(self.collectiveArgs.srcOrDst)
if self.global_rank == 0:
print(
f"\t collective={self.collectiveArgs.collective}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}"
)
def initCollectiveArgs(self, commsParams):
# lint was complaining that benchTime was too complex!
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
) = comms_utils.get_rank_details(
self.backendFuncs
) # Getting ranks from backednFuncs object, since we cannot use MPI (e.g.: TPU) to launch all the processes.
self.backendFuncs.sayHello() # Informs us where each process is running.
groups = self.backendFuncs.get_groups()
num_pgs = len(groups)
self.comm_size = world_size
self.global_rank = global_rank
comms_utils.fixBeginSize(
commsParams, world_size
) # Ensuring that all-reduce and all-to-all has atleast one member per rank.
allSizes = comms_utils.getSizes(
commsParams.beginSize, commsParams.endSize, commsParams.stepFactor
) # Given the begin-size, end-size, step-factor what are the message sizes to iterate on.
if global_rank == 0:
print(
f"[Rank {global_rank:>3}] allSizes: {allSizes} local_rank: {local_rank} element_size: {commsParams.element_size}"
)
self.collectiveArgs.group = group
self.collectiveArgs.groups = groups
self.collectiveArgs.num_pgs = num_pgs
self.collectiveArgs.device = curDevice
self.collectiveArgs.world_size = world_size
self.collectiveArgs.numIters = commsParams.numIters
self.collectiveArgs.numWarmupIters = commsParams.numWarmupIters
self.collectiveArgs.global_rank = global_rank
self.collectiveArgs.backendFuncs = self.backendFuncs
self.collectiveArgs.collective = commsParams.collective
op = self.backendFuncs.get_reduce_op("sum")
self.collectiveArgs.op = op
self.collectiveArgs.srcOrDst = commsParams.srcOrDst
self.collectiveArgs.src_ranks = commsParams.src_ranks
self.collectiveArgs.dst_ranks = commsParams.dst_ranks
self.collectiveArgs.pair = commsParams.pair
self.collectiveArgs.collective_pair = commsParams.collective_pair
self.collectiveArgs.pt2pt = commsParams.pt2pt
self.collectiveArgs.window = commsParams.window
self.collectiveArgs.asyncOp = False if commsParams.blockingFlag == 1 else True
if commsParams.bitwidth < 32:
comms_utils.initQuantCommCtx(self.collectiveArgs, commsParams)
if self.collectiveArgs.collective == "pt2pt":
self.checkPt2PtRanks()
else:
self.checkCollectiveRanks()
computeFunc = self.backendFuncs.noop
if (
commsParams.mode != "comms"
): # Compute mode related initialization if not in comms-only mode
if commsParams.kernel == "gemm":
computeFunc = self.backendFuncs.gemm
mm_dim = commsParams.mm_dim
in1 = np.random.rand(mm_dim, mm_dim)
MMin1 = torch.FloatTensor(in1).to(curDevice)
in2 = np.random.rand(mm_dim, mm_dim)
MMin2 = torch.FloatTensor(in2).to(curDevice)
in3 = np.random.rand(mm_dim, mm_dim)
MMin3 = torch.FloatTensor(in3).to(curDevice)
MMout = self.backendFuncs.alloc_empty(
[mm_dim, mm_dim], commsParams.dtype, curDevice
)
self.collectiveArgs.MMout = MMout
self.collectiveArgs.MMin1 = MMin1
self.collectiveArgs.MMin2 = MMin2
self.collectiveArgs.MMin3 = MMin3
self.collectiveArgs.numComputePerColl = commsParams.num_compute
elif commsParams.kernel == "emb_lookup":
computeFunc = self.backendFuncs.emb_lookup
emb_dim = commsParams.emb_dim
num_embeddings = commsParams.num_embs
avg_length = commsParams.avg_len
batch_size = commsParams.batch_size
print(
f"emb_dim {emb_dim} num_embs {num_embeddings} avg_len {avg_length} bs {batch_size}"
)
self.collectiveArgs.EmbWeights = self.backendFuncs.alloc_empty(
[num_embeddings, emb_dim], torch.double, curDevice
)
self.collectiveArgs.TableOffsets = torch.LongTensor(
[0, num_embeddings]
).to(curDevice)
self.collectiveArgs.Indices = torch.LongTensor(
np.random.randint(0, num_embeddings - 1, avg_length * batch_size)
).to(curDevice)
lengths = np.ones((1, batch_size)) * avg_length
flat_lengths = lengths.flatten()
self.collectiveArgs.Offsets = torch.LongTensor(
[0] + np.cumsum(flat_lengths).tolist()
).to(curDevice)
self.collectiveArgs.LookupOut = self.backendFuncs.alloc_empty(
[batch_size, emb_dim], torch.double, curDevice
)
self.collectiveArgs.AvgLengths = avg_length
self.collectiveArgs.numComputePerColl = commsParams.num_compute
return (
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
)
def gatherBenchTime(self, collectiveArgs, commsParams, timeUsElapsedList):
# Push the list to device, then do an all-gather.
timeElapsedTensor = torch.tensor(
timeUsElapsedList, device=self.backendFuncs.get_device()
)
collectiveArgs.opTensor = None
if commsParams.backend != "xla":
timeList = list(torch.ones(
(self.comm_size,) + timeElapsedTensor.shape,
dtype=timeElapsedTensor.dtype,
device=timeElapsedTensor.device,
).unbind(0))
collectiveArgs.opTensor = timeList
collectiveArgs.ipTensor = timeElapsedTensor
collectiveArgs.asyncOp = False
collectiveArgs.dataSize = (
timeElapsedTensor.nelement() * timeElapsedTensor.element_size()
)
collectiveArgs.numElements = timeElapsedTensor.nelement()
# use allgather as all process group should support it
self.backendFuncs.all_gather(collectiveArgs)
self.backendFuncs.complete_accel_ops(collectiveArgs)
return timeList
def printPreamble(self, commsParams):
logger.debug(f"\tcommsParams: {str(commsParams.__dict__)}")
header = "\n\tCOMMS-RES"
if self.collectiveArgs.collective == "pt2pt":
header += "{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
"size (B)",
"pingLatency(us):p50",
"p75",
"p95",
"pingPongLatency(us):p50",
"p75",
"p95",
"avgUniBW(GB/s)",
"avgBiBW(GB/s)",
"totalUniBW(GB/s)",
"totalBiBW(GB/s)",
)
else:
if commsParams.bitwidth < 32:
header += "-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
"size (B)",
"nElementsPerRank",
"P95 Latency(us): Quant",
"Comms",
"De-Quant",
"Overall",
)
elif not self.collectiveArgs.pair:
header += (
"{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"size (B)",
"nElementsPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
)
else:
header += "{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
"total-size (B)",
"nElementsPerRank",
"nElementsPairPerRank",
"Latency(us):p50",
"p75",
"p95",
"Min",
"Max",
"AlgBW(GB/s)",
"BusBW(GB/s)",
)
print(header)
def reportBenchTimeCollWithQuant(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
# quant tensor
quantLatencyAcrossRanks = torch.transpose(
quantTimeTensorList.view(-1, 1), 0, 1
)[0]
quantLatencyAcrossRanks = quantLatencyAcrossRanks.cpu().detach().numpy()
# dequant tensor
dequantLatencyAcrossRanks = torch.transpose(
dequantTimeTensorList.view(-1, 1), 0, 1
)[0]
dequantLatencyAcrossRanks = dequantLatencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
# quant tensor
quantLatencyAcrossRanks = np.array(quantTimeTensorList)
# dequant tensor
dequantLatencyAcrossRanks = np.array(dequantTimeTensorList)
p95 = np.percentile(latencyAcrossRanks, 95)
quant_p95 = np.percentile(quantLatencyAcrossRanks, 95)
dequant_p95 = np.percentile(dequantLatencyAcrossRanks, 95)
print(
"\tCOMMS-RES-QUANT\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (quant_p95)),
str("%.1f" % (p95 - quant_p95 - dequant_p95)),
str("%.1f" % (dequant_p95)),
str("%.1f" % (p95)),
# str("%.3f" % (algBW)),
# str("%.3f" % (busBW)),
)
)
def reportBenchTime(
self,
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
):
# convernt num_elements to # of elements per rank
if commsParams.collective in ("all_to_all", "all_to_allv"):
results["numElements"] = int(
results["numElements"] // commsParams.comms_world_info.world_size
)
if commsParams.collective == "pt2pt":
self.reportBenchTimePt2Pt(commsParams, tensorList, results)
elif commsParams.bitwidth < 32:
self.reportBenchTimeCollWithQuant(
commsParams,
results,
tensorList,
quantTimeTensorList,
dequantTimeTensorList,
)
else:
self.reportBenchTimeColl(commsParams, results, tensorList)
def reportBenchTimeColl(self, commsParams, results, tensorList):
if commsParams.backend == "xla":
latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]
latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()
else:
if isinstance(tensorList, list):
tensorList = [t.cpu().detach().numpy() for t in tensorList]
latencyAcrossRanks = np.array(tensorList)
logger.debug(f"Latency across all ranks: {latencyAcrossRanks}")
# Include only communicating ranks
if self.collectiveArgs.collective == "multicast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.dst_ranks
elif self.collectiveArgs.collective == "incast":
commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.src_ranks
else:
commRanks = range(self.collectiveArgs.world_size)
latencyAcrossCommRanks = latencyAcrossRanks[commRanks]
logger.debug(
"Latency across communicating ranks (%s): %s"
% (commRanks, latencyAcrossCommRanks)
)
p50 = np.percentile(latencyAcrossCommRanks, 50)
p75 = np.percentile(latencyAcrossCommRanks, 75)
p95 = np.percentile(latencyAcrossCommRanks, 95)
minlat = np.amin(latencyAcrossCommRanks)
maxlat = np.amax(latencyAcrossCommRanks)
# adjust busBW
busBW = results["busBW"] * (commsParams.bitwidth / 32.0)
if not self.collectiveArgs.pair:
print(
"\tCOMMS-RES{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
else:
# convernt to # of elements per rank
if commsParams.collective_pair in ("all_to_all", "all_to_allv"):
results["numElements_pair"] = int(
results["numElements_pair"]
// commsParams.comms_world_info.world_size
)
print(
"\tCOMMS-RES{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}".format(
results["memSize"],
str("%d" % (results["numElements"])),
str("%d" % (results["numElements_pair"])),
str("%.1f" % (p50)),
str("%.1f" % (p75)),
str("%.1f" % (p95)),
str("%.1f" % (minlat)),
str("%.1f" % (maxlat)),
str("%.3f" % (results["algBW"])),
str("%.3f" % (busBW)),
)
)
def reportBenchTimePt2Pt(self, commsParams, resultsAcrossRanks, results):
pingLatencyAcrossRanks = []
pingPongLatencyAcrossRanks = []
uniBWAcrossRanks = []
biBWAcrossRanks = []
# idx = 0
for curRankTensor in resultsAcrossRanks:
pingLatencyAcrossRanks.append(curRankTensor[0].item())
pingPongLatencyAcrossRanks.append(curRankTensor[1].item())
uniBWAcrossRanks.append(curRankTensor[2].item())
biBWAcrossRanks.append(curRankTensor[3].item())
pingLatencyAcrossRanks = np.array(pingLatencyAcrossRanks)
pingPongLatencyAcrossRanks = np.array(pingPongLatencyAcrossRanks)
uniBWAcrossRanks = np.array(uniBWAcrossRanks)
biBWAcrossRanks = np.array(biBWAcrossRanks)
# Include only communicating ranks
commRanks = self.collectiveArgs.src_ranks + self.collectiveArgs.dst_ranks
pingLatencyAcrossCommRanks = pingLatencyAcrossRanks[commRanks]
pingPongLatencyAcrossCommRanks = pingPongLatencyAcrossRanks[commRanks]
uniBWAcrossCommRanks = uniBWAcrossRanks[commRanks]
biBWAcrossCommRanks = biBWAcrossRanks[commRanks]
logger.debug(
"Ping latency across communicating ranks (%s): %s"
% (commRanks, pingLatencyAcrossCommRanks)
)
logger.debug(
"PingPong latency across communicating ranks (%s): %s"
% (commRanks, pingPongLatencyAcrossCommRanks)
)
logger.debug(
"UniBW across all communicating ranks (%s): %s"
% (commRanks, uniBWAcrossCommRanks)
)
logger.debug(
"BiBW across all communicating ranks (%s): %s"
% (commRanks, biBWAcrossCommRanks)
)
avgUniBW = np.mean(uniBWAcrossCommRanks)
avgBiBW = np.mean(biBWAcrossCommRanks)
totalUniBW = np.sum(uniBWAcrossCommRanks) / 2
totalBiBW = np.sum(biBWAcrossCommRanks) / 2
ping_p50 = np.percentile(pingLatencyAcrossCommRanks, 50)
ping_p75 = np.percentile(pingLatencyAcrossCommRanks, 75)
ping_p95 = np.percentile(pingLatencyAcrossCommRanks, 95)
ping_pong_p50 = np.percentile(pingPongLatencyAcrossCommRanks, 50)
ping_pong_p75 = np.percentile(pingPongLatencyAcrossCommRanks, 75)
ping_pong_p95 = np.percentile(pingPongLatencyAcrossCommRanks, 95)
print(
"\tCOMMS-RES{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}".format(
results["memSize"],
str("%.1f" % (ping_p50)),
str("%.1f" % (ping_p75)),
str("%.1f" % (ping_p95)),
str("%.1f" % (ping_pong_p50)),
str("%.1f" % (ping_pong_p75)),
str("%.1f" % (ping_pong_p95)),
str("%.3f" % (avgUniBW)),
str("%.3f" % (avgBiBW)),
str("%.3f" % (totalUniBW)),
str("%.3f" % (totalBiBW)),
)
)
def benchTime(self, index, commsParams, backendFuncs):
# Get NW stack specific parameters
(
local_rank,
global_rank,
world_size,
group,
curDevice,
curHwDevice,
allSizes,
computeFunc,
) = self.initCollectiveArgs(commsParams)
backendFuncs.sync_barrier(self.collectiveArgs)
if global_rank == 0:
self.printPreamble(commsParams)
for curSize in allSizes:
results = {}
timeUsElapsedList = []
quantTimeElapsedList = []
dequantTimeElapsedList = []
numElements = int(curSize // commsParams.element_size)
collectiveFunc = self.backendFuncs.noop
collectiveFunc_pair = self.backendFuncs.noop
if (
commsParams.mode != "compute"
): # comms specific initializations if not in compute-only mode
# set corresponding function pointers
if commsParams.collective != "pt2pt":
collectiveFunc = backendFuncs.collectiveFunc[commsParams.collective]
(
self.collectiveArgs.ipTensor,
self.collectiveArgs.opTensor,
) = self.prepComm(
curComm={
"in_msg_size": numElements,
"out_msg_size": numElements,
"world_size": world_size,
},
commsParams=commsParams,
)
# Setup the arguments.
self.collectiveArgs.dataSize = curSize
self.collectiveArgs.numElements = numElements
self.collectiveArgs.waitObj = []
results["numElements"] = numElements
if (
commsParams.pair and commsParams.mode != "compute"
): # comms-pair specific initializations if not in compute-only mode:
# set corresponding function pointers
collectiveFunc_pair = backendFuncs.collectiveFunc[
commsParams.collective_pair
]
# TODO: allow user to set specific size
# Setup the arguments.
self.collectiveArgs.dataSize_pair = curSize
self.collectiveArgs.numElements_pair = int(
self.collectiveArgs.dataSize_pair // commsParams.element_size
)
results["numElements_pair"] = self.collectiveArgs.numElements_pair
(
self.collectiveArgs.ipTensor_pair,
self.collectiveArgs.opTensor_pair,
) = self.prepComm(
curComm={
"in_msg_size": self.collectiveArgs.numElements_pair,
"out_msg_size": self.collectiveArgs.numElements_pair,
"world_size": world_size,
},
commsParams=commsParams,
)
# self.collectiveArgs has all the information on the experiment.
if commsParams.collective == "pt2pt":
results.update(self.runPt2Pt())
timeUsElapsedList = [
np.mean(np.array(results["pingPerIterNS"])) / 1e3,
np.mean(np.array(results["pingPongPerIterNS"])) / 1e3,
results["avgUniBW"],
results["avgBiBW"],
] # time in US
if (
global_rank in self.collectiveArgs.src_ranks
or global_rank in self.collectiveArgs.dst_ranks
):
logger.debug(timeUsElapsedList)
else:
results.update(
self.runColl(
comm_fn=collectiveFunc,
compute_fn=computeFunc,
comm_fn_pair=collectiveFunc_pair,
)
)
timeUsElapsedList = [results["timeUS"]]
# perfom data validation check on the final opTensor
if commsParams.dcheck == 1:
self.dcheck(commsParams, curSize, self.collectiveArgs.opTensor)
backendFuncs.clear_memory(self.collectiveArgs)
# gather quantization overhead if enabled
if commsParams.bitwidth < 32:
# calculate average (de-)quantization overhead
results["quantTimeUS"] = (
self.collectiveArgs.quant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
results["dequantTimeUS"] = (
self.collectiveArgs.dequant_time.getTimeUS()
/ self.collectiveArgs.numIters
)
quantTimeElapsedList.append(results["quantTimeUS"])
dequantTimeElapsedList.append(results["dequantTimeUS"])
logger.debug(quantTimeElapsedList)
quantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, quantTimeElapsedList
)
dequantTimeElapsedList = self.gatherBenchTime(
self.collectiveArgs, commsParams, dequantTimeElapsedList
)
# gather and report performance to stdout
tensorList = self.gatherBenchTime(
self.collectiveArgs, commsParams, timeUsElapsedList
)
if global_rank == 0:
self.reportBenchTime(
commsParams,
results,
tensorList,
quantTimeElapsedList,
dequantTimeElapsedList,
)
self.backendFuncs.sync_barrier(
self.collectiveArgs, desc=f"curSize_{curSize}"
)
comms_utils.clearQuantCommCtx(self.collectiveArgs)
# wait rank 0 reports results to avoid other ranks mess up the output
self.backendFuncs.sync_barrier(self.collectiveArgs, "benchtime")
def runBench(self, comms_world_info, commsParams):
# Init the desired backend
if commsParams.nw_stack == "pytorch-dist":
from pytorch_dist_backend import PyTorchDistBackend
backendObj = PyTorchDistBackend(comms_world_info, commsParams)
elif commsParams.nw_stack == "pytorch-xla-tpu":
from pytorch_tpu_backend import PyTorchTPUBackend
backendObj = PyTorchTPUBackend(comms_world_info, commsParams)
else:
logger.error("Unsupported NW stack! ")
comms_utils.gracefulExit()
self.backendFuncs = backendObj
try:
backendObj.benchmark_comms()
except ValueError as ve:
if commsParams.backend == "ucc":
logger.critical("PyTorch UCC not implemented? {}".format(repr(ve)))
raise
def main():
collBenchObj = commsCollBench()
### parse arguments ###
parser = argparse.ArgumentParser(
description="PARAM-Comm Benchmark",
formatter_class=MultilineFormatter,
)
args, leftovers = collBenchObj.readArgs(parser)
collBenchObj.checkArgs(args)
comms_env_params = comms_utils.read_comms_env_vars()
if comms_env_params["global_rank"] == 0:
print("\t MPI environment: %s " % (str(comms_env_params)))
print(
"\t backend: %s nw-stack: %s mode: %s args.b: %d args.e: %d args.f: %d args.z: %s args.master_ip: %s "
% (
args.backend,
args.nw_stack,
args.mode,
args.b,
args.e,
args.f,
args.z,
args.master_ip,
)
)
element_size = torch.ones([1], dtype=args.dtype).element_size()
comms_world_info = comms_utils.comms_world_info_holder(
args.master_ip, args.master_port, args.num_tpu_cores, comms_env_params
)
commsParams = comms_utils.commsParamsHolder(
args, comms_world_info, element_size, collBenchObj.benchTime
)
if args.pair and args.overlap_pair_pgs:
commsParams.num_pgs = 2
collBenchObj.runBench(comms_world_info, commsParams)
if __name__ == "__main__":
main()
| true | true |
f717719b096d0afc9e9656a8a3d67b6ed3d90cd0 | 3,338 | py | Python | mime/agent/script_agent_augmented.py | rjgpinel/mime-release | 26a850c4ba5b702b86d068995614163338fb01df | [
"MIT"
] | null | null | null | mime/agent/script_agent_augmented.py | rjgpinel/mime-release | 26a850c4ba5b702b86d068995614163338fb01df | [
"MIT"
] | null | null | null | mime/agent/script_agent_augmented.py | rjgpinel/mime-release | 26a850c4ba5b702b86d068995614163338fb01df | [
"MIT"
] | null | null | null | import itertools
import types
import numpy as np
import torch
import click
import gym
import time
import yaml
from robos2r.model import build_model
from .agent import Agent
from .script_agent import ScriptAgent, make_noised
from .utils import Rate
from PIL import Image
from pathlib import Path
from einops import rearrange
from torchvision import transforms as T
@click.command(help="script_agent env_name [options]")
@click.argument("env_name", type=str)
@click.option("-s", "--seed", default=0, help="seed")
@click.option("-t", "--times-repeat", default=1, help="times to repeat the script")
@click.option("-n", "--add-noise", is_flag=True, help="adding noise to actions or not")
@click.option(
"-sc",
"--skill-collection/--no-skill-collection",
is_flag=True,
help="whether to show the skills collection",
)
def main(env_name, seed, times_repeat, add_noise, skill_collection):
print("Loading Augmentor model...")
diffaug_model_path = "/home/rgarciap/Remote/models/diffs2r_new/resnet_adam_lr_1e-3_lraug0.01_bs_64_L8/"
diffaug_model_path = Path(diffaug_model_path)
diffaug_cfg_path = diffaug_model_path / "config.yml"
with open(str(diffaug_cfg_path), "rb") as f:
diffaug_cfg = yaml.load(f, Loader=yaml.FullLoader)
model_cfg = dict(
name="diffaug",
reg_output_size=3,
aug_pipeline=diffaug_cfg["aug_pipeline"],
multi=diffaug_cfg["multi_pipeline"],
num_layers=diffaug_cfg["num_layers"],
gumbel=diffaug_cfg["gumbel"],
backbone_name=diffaug_cfg["backbone_name"],
)
diffaug_model = build_model(model_cfg)
diffaug_ckp_path = diffaug_model_path / "best_checkpoint.pth"
checkpoint = torch.load(str(diffaug_ckp_path), map_location="cpu")
diffaug_model.load_state_dict(checkpoint["model"])
augmentor = diffaug_model.augmentor
augmentor.to("cpu")
augmentor.eval()
print("Model loaded")
env = gym.make(env_name)
scene = env.unwrapped.scene
scene.renders(True)
if skill_collection:
scene.skill_data_collection = True
env.seed(seed)
for _ in range(times_repeat):
obs = env.reset()
agent = ScriptAgent(env)
import matplotlib.pyplot as plt
done = False
i = 0
rate = Rate(scene.dt)
action = agent.get_action()
if add_noise:
make_noised(action)
frames = []
j = 0
while not done and action is not None:
obs, reward, done, info = env.step(action)
im = T.ToTensor()(obs["rgb0"]).unsqueeze(0)
mask = torch.tensor(obs["mask0"]).unsqueeze(0)
im, mask = augmentor((im, mask))
im = rearrange(im.detach().detach().squeeze(0).numpy(), "c h w -> h w c")
im = Image.fromarray((im * 255).astype(np.uint8))
im.save(f"0/output{j}.jpeg")
j += 1
action = agent.get_action()
if add_noise and action is not None:
make_noised(action)
if action is None:
info["failure_message"] = "End of Script."
if not info["success"]:
click.secho(
"Failure Seed {}: {}".format(seed, info["failure_message"]), fg="red"
)
print("Success", info["success"])
if __name__ == "__main__":
main()
| 31.490566 | 107 | 0.641402 | import itertools
import types
import numpy as np
import torch
import click
import gym
import time
import yaml
from robos2r.model import build_model
from .agent import Agent
from .script_agent import ScriptAgent, make_noised
from .utils import Rate
from PIL import Image
from pathlib import Path
from einops import rearrange
from torchvision import transforms as T
@click.command(help="script_agent env_name [options]")
@click.argument("env_name", type=str)
@click.option("-s", "--seed", default=0, help="seed")
@click.option("-t", "--times-repeat", default=1, help="times to repeat the script")
@click.option("-n", "--add-noise", is_flag=True, help="adding noise to actions or not")
@click.option(
"-sc",
"--skill-collection/--no-skill-collection",
is_flag=True,
help="whether to show the skills collection",
)
def main(env_name, seed, times_repeat, add_noise, skill_collection):
print("Loading Augmentor model...")
diffaug_model_path = "/home/rgarciap/Remote/models/diffs2r_new/resnet_adam_lr_1e-3_lraug0.01_bs_64_L8/"
diffaug_model_path = Path(diffaug_model_path)
diffaug_cfg_path = diffaug_model_path / "config.yml"
with open(str(diffaug_cfg_path), "rb") as f:
diffaug_cfg = yaml.load(f, Loader=yaml.FullLoader)
model_cfg = dict(
name="diffaug",
reg_output_size=3,
aug_pipeline=diffaug_cfg["aug_pipeline"],
multi=diffaug_cfg["multi_pipeline"],
num_layers=diffaug_cfg["num_layers"],
gumbel=diffaug_cfg["gumbel"],
backbone_name=diffaug_cfg["backbone_name"],
)
diffaug_model = build_model(model_cfg)
diffaug_ckp_path = diffaug_model_path / "best_checkpoint.pth"
checkpoint = torch.load(str(diffaug_ckp_path), map_location="cpu")
diffaug_model.load_state_dict(checkpoint["model"])
augmentor = diffaug_model.augmentor
augmentor.to("cpu")
augmentor.eval()
print("Model loaded")
env = gym.make(env_name)
scene = env.unwrapped.scene
scene.renders(True)
if skill_collection:
scene.skill_data_collection = True
env.seed(seed)
for _ in range(times_repeat):
obs = env.reset()
agent = ScriptAgent(env)
import matplotlib.pyplot as plt
done = False
i = 0
rate = Rate(scene.dt)
action = agent.get_action()
if add_noise:
make_noised(action)
frames = []
j = 0
while not done and action is not None:
obs, reward, done, info = env.step(action)
im = T.ToTensor()(obs["rgb0"]).unsqueeze(0)
mask = torch.tensor(obs["mask0"]).unsqueeze(0)
im, mask = augmentor((im, mask))
im = rearrange(im.detach().detach().squeeze(0).numpy(), "c h w -> h w c")
im = Image.fromarray((im * 255).astype(np.uint8))
im.save(f"0/output{j}.jpeg")
j += 1
action = agent.get_action()
if add_noise and action is not None:
make_noised(action)
if action is None:
info["failure_message"] = "End of Script."
if not info["success"]:
click.secho(
"Failure Seed {}: {}".format(seed, info["failure_message"]), fg="red"
)
print("Success", info["success"])
if __name__ == "__main__":
main()
| true | true |
f71771ce6064be5fc44ff24790cce8db6106923c | 8,762 | py | Python | benchmark_runner/common/clouds/shared/s3/s3_operations.py | kpouget/benchmark-runner | eecdb57d12f8c17268800632722af8fe8046185a | [
"Apache-2.0"
] | 10 | 2021-07-21T21:44:20.000Z | 2022-02-24T22:01:13.000Z | benchmark_runner/common/clouds/shared/s3/s3_operations.py | kpouget/benchmark-runner | eecdb57d12f8c17268800632722af8fe8046185a | [
"Apache-2.0"
] | 83 | 2021-07-20T14:37:44.000Z | 2022-03-24T13:48:04.000Z | benchmark_runner/common/clouds/shared/s3/s3_operations.py | kpouget/benchmark-runner | eecdb57d12f8c17268800632722af8fe8046185a | [
"Apache-2.0"
] | 6 | 2021-07-14T21:12:48.000Z | 2022-02-15T12:48:27.000Z |
import os
import boto3
import typeguard
from botocore.exceptions import ClientError
from os import listdir
from os.path import isfile, join
from benchmark_runner.common.clouds.shared.s3.s3_operations_exceptions import S3FileNotUploaded, S3FileNotDownloaded, S3FileNotDeleted, S3KeyNotCreated, S3FileNotExist, S3FailedCreatePresingedURL
from benchmark_runner.main.environment_variables import environment_variables
class S3Operations:
""" This class is responsible for S3 operations """
def __init__(self, region_name: str = '', endpoint_url: str = None, aws_access_key_id: str = None, aws_secret_access_key: str = None):
# environment variables
self.__environment_variables_dict = environment_variables.environment_variables_dict
# must add region for pytest
if region_name:
self.__region = region_name
self.__endpoint_url = endpoint_url
self.__aws_access_key_id = aws_access_key_id
self.__aws_secret_access_key = aws_secret_access_key
else:
self.__region = self.__environment_variables_dict.get('region_name', '')
# must be None for pytest
self.__endpoint_url = self.__environment_variables_dict.get('endpoint_url', None)
self.__aws_access_key_id = self.__environment_variables_dict.get('access_key_id', '')
self.__aws_secret_access_key = self.__environment_variables_dict.get('secret_access_key', '')
self.__s3_client = boto3.client(service_name='s3',
region_name=self.__region,
endpoint_url=self.__endpoint_url,
aws_access_key_id=self.__aws_access_key_id,
aws_secret_access_key=self.__aws_secret_access_key)
@typeguard.typechecked
def upload_file(self, file_name_path: str, bucket: str, key: str, upload_file: str):
"""
This method upload file to s3
:param file_name_path:'/home/user/test.txt'
:param bucket:'benchmark'
:param key:'test-data'
:param upload_file:'test.txt'
:return:
"""
try:
self.__s3_client.upload_file(Filename=file_name_path,
Bucket=bucket,
Key=f'{key}/{upload_file}',
ExtraArgs={'ServerSideEncryption': 'AES256'})
except ClientError:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_file(self, bucket: str, key: str, download_file: str, file_name_path: str):
"""
This method download file from s3
:param bucket:'benchmark'
:param key:'logs/ec2-idle/2021/01/19/18'
:param download_file: 'test.txt'
:param file_name_path:'D:\\Performance\\Projects\\py-image-service\\data\\rt_results\\test.txt'
:return:
"""
try:
if download_file:
self.__s3_client.download_file(Bucket=bucket, Key=f'{key}/{download_file}', Filename=file_name_path)
else:
self.__s3_client.download_file(Bucket=bucket, Key=key, Filename=file_name_path)
except ClientError:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def delete_file(self, bucket: str, key: str, file_name: str):
"""
This method delete file from s3
:param bucket:'benchmark'
:param key:'test-data'
:param file_name: 'test.txt'
:return:
"""
try:
self.__s3_client.delete_object(Bucket=bucket, Key=f'{key}/{file_name}')
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def delete_folder(self, bucket: str, key: str):
"""
This method delete folder from s3
:param bucket:'benchmark'
:param key:'framework/test'
:return:
"""
try:
objects_to_delete = self.__s3_client.list_objects(Bucket=bucket, Prefix=key)
delete_keys = {
'Objects': [{'Key': k} for k in [obj['Key'] for obj in objects_to_delete.get('Contents', [])]]}
if delete_keys['Objects']:
self.__s3_client.delete_objects(Bucket=bucket, Delete=delete_keys)
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def create_folder(self, bucket: str, key: str):
"""
This method download file from s3
:param bucket:'benchmark'
:param key:'framework/test'
:return:
"""
try:
self.__s3_client.put_object(Bucket=bucket, Key=key)
except ClientError:
raise
except Exception:
raise S3KeyNotCreated
@typeguard.typechecked
def file_exist(self, bucket: str, key: str, file_name: str):
"""
This method check if file exist
:param bucket:'benchmark'
:param key:'framework/test'
:param file_name:'file.txt'
:return:
"""
try:
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if file_name in item['Key']:
return True
return False
# Todo add custom error
except ClientError:
raise
except Exception:
raise S3FileNotExist
@typeguard.typechecked
def upload_objects(self, local_source: str, s3_target: str):
"""
This method upload local data folder to s3 target path
:param local_source: local data folder i.e. '/home/user/'
:param s3_target: target s3 path i.e. 'data_store/calc_image_data/'
:return:
"""
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
files = [f for f in listdir(local_source) if isfile(join(local_source, f))]
for file in files:
filename = os.path.join(local_source, file)
self.upload_file(file_name_path=filename, bucket=bucket, key=key, upload_file=file)
except ClientError as err:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_objects(self, s3_target: str, local_source: str):
"""
This method download from s3 target to local data folder
:param local_source: local data folder i.e. '/home/user/
:param s3_target: target s3 path i.e. 'data_store/calc_image_data/'
:return:
"""
files = []
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if item['Key'].split('/')[-1]:
files.append(item['Key'].split('/')[-1])
else:
files.append(item['Key'])
for file in files:
file_name = os.path.join(local_source, file)
self.download_file(bucket=bucket, key=key, download_file=file, file_name_path=file_name)
except ClientError as err:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def generate_presigned_url(self, bucket: str, key: str, file_name: str):
"""
This method generate presigned url for specific uploaded object, default 7 days
:param bucket:'benchmark'
:param key:'logs/test-data'
:param file_name:'file.txt'
:return:
"""
try:
return self.__s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket, 'Key': f'{key}/{file_name}'},
ExpiresIn=604800)
# Todo add custom error
except ClientError:
raise
except Exception:
raise S3FailedCreatePresingedURL
| 37.127119 | 195 | 0.57738 |
import os
import boto3
import typeguard
from botocore.exceptions import ClientError
from os import listdir
from os.path import isfile, join
from benchmark_runner.common.clouds.shared.s3.s3_operations_exceptions import S3FileNotUploaded, S3FileNotDownloaded, S3FileNotDeleted, S3KeyNotCreated, S3FileNotExist, S3FailedCreatePresingedURL
from benchmark_runner.main.environment_variables import environment_variables
class S3Operations:
def __init__(self, region_name: str = '', endpoint_url: str = None, aws_access_key_id: str = None, aws_secret_access_key: str = None):
self.__environment_variables_dict = environment_variables.environment_variables_dict
if region_name:
self.__region = region_name
self.__endpoint_url = endpoint_url
self.__aws_access_key_id = aws_access_key_id
self.__aws_secret_access_key = aws_secret_access_key
else:
self.__region = self.__environment_variables_dict.get('region_name', '')
self.__endpoint_url = self.__environment_variables_dict.get('endpoint_url', None)
self.__aws_access_key_id = self.__environment_variables_dict.get('access_key_id', '')
self.__aws_secret_access_key = self.__environment_variables_dict.get('secret_access_key', '')
self.__s3_client = boto3.client(service_name='s3',
region_name=self.__region,
endpoint_url=self.__endpoint_url,
aws_access_key_id=self.__aws_access_key_id,
aws_secret_access_key=self.__aws_secret_access_key)
@typeguard.typechecked
def upload_file(self, file_name_path: str, bucket: str, key: str, upload_file: str):
try:
self.__s3_client.upload_file(Filename=file_name_path,
Bucket=bucket,
Key=f'{key}/{upload_file}',
ExtraArgs={'ServerSideEncryption': 'AES256'})
except ClientError:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_file(self, bucket: str, key: str, download_file: str, file_name_path: str):
try:
if download_file:
self.__s3_client.download_file(Bucket=bucket, Key=f'{key}/{download_file}', Filename=file_name_path)
else:
self.__s3_client.download_file(Bucket=bucket, Key=key, Filename=file_name_path)
except ClientError:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def delete_file(self, bucket: str, key: str, file_name: str):
try:
self.__s3_client.delete_object(Bucket=bucket, Key=f'{key}/{file_name}')
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def delete_folder(self, bucket: str, key: str):
try:
objects_to_delete = self.__s3_client.list_objects(Bucket=bucket, Prefix=key)
delete_keys = {
'Objects': [{'Key': k} for k in [obj['Key'] for obj in objects_to_delete.get('Contents', [])]]}
if delete_keys['Objects']:
self.__s3_client.delete_objects(Bucket=bucket, Delete=delete_keys)
except ClientError:
raise
except Exception:
raise S3FileNotDeleted
@typeguard.typechecked
def create_folder(self, bucket: str, key: str):
try:
self.__s3_client.put_object(Bucket=bucket, Key=key)
except ClientError:
raise
except Exception:
raise S3KeyNotCreated
@typeguard.typechecked
def file_exist(self, bucket: str, key: str, file_name: str):
try:
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if file_name in item['Key']:
return True
return False
except ClientError:
raise
except Exception:
raise S3FileNotExist
@typeguard.typechecked
def upload_objects(self, local_source: str, s3_target: str):
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
files = [f for f in listdir(local_source) if isfile(join(local_source, f))]
for file in files:
filename = os.path.join(local_source, file)
self.upload_file(file_name_path=filename, bucket=bucket, key=key, upload_file=file)
except ClientError as err:
raise
except Exception:
raise S3FileNotUploaded
@typeguard.typechecked
def download_objects(self, s3_target: str, local_source: str):
files = []
try:
if '/' in s3_target:
targets = s3_target.split('/')
bucket = targets[0]
key = '/'.join(targets[1:])
else:
bucket = s3_target
key = ''
response = self.__s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
if response.get('Contents'):
for item in response['Contents']:
if item['Key'].split('/')[-1]:
files.append(item['Key'].split('/')[-1])
else:
files.append(item['Key'])
for file in files:
file_name = os.path.join(local_source, file)
self.download_file(bucket=bucket, key=key, download_file=file, file_name_path=file_name)
except ClientError as err:
raise
except Exception:
raise S3FileNotDownloaded
@typeguard.typechecked
def generate_presigned_url(self, bucket: str, key: str, file_name: str):
try:
return self.__s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket, 'Key': f'{key}/{file_name}'},
ExpiresIn=604800)
except ClientError:
raise
except Exception:
raise S3FailedCreatePresingedURL
| true | true |
f71772584a21b6b069b86c34169c1b8debf5cb25 | 476 | py | Python | erinn/python/models/__init__.py | swcjack6931677/ERINN | a4f3d0ad213515bc86e2a18575537d6affd472ac | [
"MIT"
] | null | null | null | erinn/python/models/__init__.py | swcjack6931677/ERINN | a4f3d0ad213515bc86e2a18575537d6affd472ac | [
"MIT"
] | null | null | null | erinn/python/models/__init__.py | swcjack6931677/ERINN | a4f3d0ad213515bc86e2a18575537d6affd472ac | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import CNN
from . import CNN1D
from . import CNN1D_Rx
from . import CNN1D_Tx
from . import DFN
# Globally-importable models.
from .CNN import get_cnn_relu
from .CNN1D import get_cnn1d_relu
from .CNN1D_Rx import get_cnn1d_rx_relu
from .CNN1D_Tx import get_cnn1d_tx
from .CNN1D_Tx import get_cnn1d_tx_relu
from .DFN import get_dfn_relu
| 25.052632 | 39 | 0.813025 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import CNN
from . import CNN1D
from . import CNN1D_Rx
from . import CNN1D_Tx
from . import DFN
from .CNN import get_cnn_relu
from .CNN1D import get_cnn1d_relu
from .CNN1D_Rx import get_cnn1d_rx_relu
from .CNN1D_Tx import get_cnn1d_tx
from .CNN1D_Tx import get_cnn1d_tx_relu
from .DFN import get_dfn_relu
| true | true |
f717729d6712f5bbd8b1c9f44dde5e6c1bc8107e | 411 | py | Python | server/config.py | ahnaf-zamil/flask-react-session-authenticaton-tutorial | 88c454af9932435d0bd9ad1c16718beb6fc0e1c1 | [
"MIT"
] | 1 | 2021-11-01T10:46:16.000Z | 2021-11-01T10:46:16.000Z | server/config.py | ahnaf-zamil/flask-react-session-authenticaton-tutorial | 88c454af9932435d0bd9ad1c16718beb6fc0e1c1 | [
"MIT"
] | null | null | null | server/config.py | ahnaf-zamil/flask-react-session-authenticaton-tutorial | 88c454af9932435d0bd9ad1c16718beb6fc0e1c1 | [
"MIT"
] | null | null | null | from dotenv import load_dotenv
import os
import redis
load_dotenv()
class ApplicationConfig:
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = r"sqlite:///./db.sqlite"
SESSION_TYPE = "redis"
SESSION_PERMANENT = False
SESSION_USE_SIGNER = True
SESSION_REDIS = redis.from_url("redis://127.0.0.1:6379") | 24.176471 | 60 | 0.734793 | from dotenv import load_dotenv
import os
import redis
load_dotenv()
class ApplicationConfig:
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = r"sqlite:///./db.sqlite"
SESSION_TYPE = "redis"
SESSION_PERMANENT = False
SESSION_USE_SIGNER = True
SESSION_REDIS = redis.from_url("redis://127.0.0.1:6379") | true | true |
f71772cc3a746eb0432c3f9d0877b4f980c06a5f | 1,344 | py | Python | Embed.py | zhengxiawu/Transformer | 8cad013913254ea4e06c4a8d460d9f2cf42df086 | [
"Apache-2.0"
] | null | null | null | Embed.py | zhengxiawu/Transformer | 8cad013913254ea4e06c4a8d460d9f2cf42df086 | [
"Apache-2.0"
] | null | null | null | Embed.py | zhengxiawu/Transformer | 8cad013913254ea4e06c4a8d460d9f2cf42df086 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import math
from torch.autograd import Variable
class Embedder(nn.Module):
def __init__(self, vocab_size, d_model):
super().__init__()
self.d_model = d_model
self.embed = nn.Embedding(vocab_size, d_model)
def forward(self, x):
return self.embed(x)
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_seq_len=200, dropout=0.1):
super().__init__()
self.d_model = d_model
self.dropout = nn.Dropout(dropout)
# create constant 'pe' matrix with values dependant on
# pos and i
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
# add constant to embedding
seq_len = x.size(1)
pe = Variable(self.pe[:, :seq_len], requires_grad=False)
if x.is_cuda:
pe.cuda()
x = x + pe
return self.dropout(x)
| 30.545455 | 70 | 0.563244 | import torch
import torch.nn as nn
import math
from torch.autograd import Variable
class Embedder(nn.Module):
def __init__(self, vocab_size, d_model):
super().__init__()
self.d_model = d_model
self.embed = nn.Embedding(vocab_size, d_model)
def forward(self, x):
return self.embed(x)
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_seq_len=200, dropout=0.1):
super().__init__()
self.d_model = d_model
self.dropout = nn.Dropout(dropout)
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x * math.sqrt(self.d_model)
seq_len = x.size(1)
pe = Variable(self.pe[:, :seq_len], requires_grad=False)
if x.is_cuda:
pe.cuda()
x = x + pe
return self.dropout(x)
| true | true |
f71772d3409ea5f7a2383df7d2159cca0dc08add | 8,729 | py | Python | rl/train_a2c_mc.py | ds4dm/GraphRL | b5b1519f6dd92b401625d51add9ae5829004a30b | [
"MIT"
] | 2 | 2021-02-26T18:51:01.000Z | 2021-07-12T05:20:18.000Z | rl/train_a2c_mc.py | pandat8/GraphRL | b5b1519f6dd92b401625d51add9ae5829004a30b | [
"MIT"
] | 3 | 2019-05-09T20:59:10.000Z | 2020-05-13T14:03:50.000Z | rl/train_a2c_mc.py | pandat8/GraphRL | b5b1519f6dd92b401625d51add9ae5829004a30b | [
"MIT"
] | 3 | 2018-08-13T20:43:29.000Z | 2020-05-13T14:00:57.000Z | import torch
import torch.optim as optm
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from data.graph import Graph
from collections import namedtuple
SavedAction = namedtuple('SavedAction', ['log_prob', 'value_current'])
# Mont Carlo methods
class TrainModel_MC:
def __init__(self, model, train_dataset, val_dataset, max_grad_norm=2, use_cuda=False):
self.model = model
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.max_grad_norm = max_grad_norm
self.use_cuda = use_cuda
self.train_loader = DataLoader(train_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.val_loader = DataLoader(val_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.epochs = 0
self.beta = 0.9
self.eps = np.finfo(np.float32).eps.item()
def train_and_validate(self, n_epochs, lr_actor, lr_critic, gamma=0.99, use_critic=True):
self.actor_optim = optm.Adam(self.model.actor.parameters(), lr=lr_actor)
print(use_critic)
if use_critic:
self.critic_optim = optm.Adam(self.model.critic.parameters(), lr=lr_critic)
self.critic_loss_criterion = torch.nn.MSELoss()
else:
baseline = torch.zeros(1)
if self.use_cuda:
baseline = baseline.cuda()
for epoch in range(1):
n_graphs_proceed = 0
for X in self.train_loader:
for x in X:
self.model.train()
ratio_gcn2mind = []
ratio_gcn2rand = []
for epoch in range(n_epochs):
rewards_mindegree = 0 # number of added edges
rewards_random = 0
x_mind = Graph(x.M)
x_rand = Graph(x.M)
x_rl = Graph(x.M)
# loop for training while eliminating a graph iteratively
for i in range(x.n - 2):
# baseline1: compute return of min degree
if i % 100 == 0:
print('iterations {}'.format(i))
node_mind, d_min = x_mind.min_degree(x_mind.M)
rewards_mindegree += x_mind.eliminate_node(node_mind, reduce=True)
# baseline2: compute return of random
rewards_random += x_rand.eliminate_node(np.random.randint(low=0, high=x_rand.n), reduce=True)
# call actor-critic model
action, log_prob, reward, value_current, value_next, x_rl = self.model(x_rl) # forward propagation,action: node selected, reward: nb edges added
self.model.rewards.append(reward)
self.model.actions.append(action)
self.model.saved_actions.append(SavedAction(log_prob, value_current))
R = 0
actor_losses = []
critic_losses = []
returns = []
# compute sampled return for each step
for r in self.model.rewards[::-1]:
R = r + gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + self.eps)
saved_actions = self.model.saved_actions
# compute cummulated loss of actor and critic of one graph
for (log_prob, value_current), R in zip(saved_actions, returns):
if use_critic:
advantage = R - value_current
critic_losses.append(-value_current* advantage)
# critic_losses.append(self.critic_loss_criterion(value_current, torch.Tensor([R.detach()])))
else:
advantage = R - baseline
actor_losses.append(log_prob * advantage.detach()) # the return here is discounted nb of added edges,
# hence, it actually represents loss
# step update of actor
self.actor_optim.zero_grad()
actor_loss = torch.stack(actor_losses).sum()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# step update of critic
if use_critic:
self.critic_optim.zero_grad()
critic_closs = torch.stack(critic_losses).sum()
critic_closs.backward()
self.critic_optim.step()
else:
baseline = baseline.detach()
rewards_gcn = sum(self.model.rewards)
_ratio_gcn2mind = rewards_gcn / rewards_mindegree
_ratio_gcn2rand = rewards_gcn / rewards_random
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2mind ratio {}'.format(_ratio_gcn2mind),
'value {}'.format(saved_actions[0].value_current),
'R {}'.format(returns[0]))
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2rand ratio {}'.format(_ratio_gcn2rand))
ratio_gcn2mind.append(_ratio_gcn2mind)
ratio_gcn2rand.append(_ratio_gcn2rand)
del self.model.rewards[:]
del self.model.actions[:]
del self.model.saved_actions[:]
ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)
ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)
min_ratio_gcn2mind = np.min(ratio_gcn2mind)
max_ratio_gcn2mind = np.max(ratio_gcn2mind)
av_ratio_gcn2mind = np.sum(ratio_gcn2mind)/ n_epochs
min_ratio_gcn2rand = np.min(ratio_gcn2rand)
max_ratio_gcn2rand = np.max(ratio_gcn2rand)
av_ratio_gcn2rand = np.sum(ratio_gcn2rand) / n_epochs
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2mind{:04d}',
'min_ratio {}'.format(min_ratio_gcn2mind),
'max_ratio {}'.format(max_ratio_gcn2mind),
'av_ratio {}'.format(av_ratio_gcn2mind))
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2rand{:04d}',
'min_ratio {}'.format(min_ratio_gcn2rand),
'max_ratio {}'.format(max_ratio_gcn2rand),
'av_ratio {}'.format(av_ratio_gcn2rand),
'nb graph proceeded {}'.format(n_graphs_proceed))
n_graphs_proceed += len(X)
# ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)
# ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)
#
# total_ratio_gcn2mind = np.sum(ratio_gcn2mind)
# total_ratio_gcn2rand = np.sum(ratio_gcn2rand)
#
# min_ratio_gcn2mind = np.min(ratio_gcn2mind)
# max_ratio_gcn2mind = np.max(ratio_gcn2mind)
# av_ratio_gcn2mind = total_ratio_gcn2mind / n_graphs_proceed
#
# min_ratio_gcn2rand = np.min(ratio_gcn2rand)
# max_ratio_gcn2rand = np.max(ratio_gcn2rand)
# av_ratio_gcn2rand = total_ratio_gcn2rand / n_graphs_proceed
#
# print('epoch {:04d}'.format(epoch), 'gcn2mind{:04d}',
# 'min_ratio {}'.format(min_ratio_gcn2mind),
# 'max_ratio {}'.format(max_ratio_gcn2mind),
# 'av_ratio {}'.format(av_ratio_gcn2mind))
# print('epoch {:04d}'.format(epoch), 'gcn2rand{:04d}',
# 'min_ratio {}'.format(min_ratio_gcn2rand),
# 'max_ratio {}'.format(max_ratio_gcn2rand),
# 'av_ratio {}'.format(av_ratio_gcn2rand),
# 'nb graph proceeded {}'.format(n_graphs_proceed))
| 47.961538 | 172 | 0.520907 | import torch
import torch.optim as optm
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from data.graph import Graph
from collections import namedtuple
SavedAction = namedtuple('SavedAction', ['log_prob', 'value_current'])
class TrainModel_MC:
def __init__(self, model, train_dataset, val_dataset, max_grad_norm=2, use_cuda=False):
self.model = model
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.max_grad_norm = max_grad_norm
self.use_cuda = use_cuda
self.train_loader = DataLoader(train_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.val_loader = DataLoader(val_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.epochs = 0
self.beta = 0.9
self.eps = np.finfo(np.float32).eps.item()
def train_and_validate(self, n_epochs, lr_actor, lr_critic, gamma=0.99, use_critic=True):
self.actor_optim = optm.Adam(self.model.actor.parameters(), lr=lr_actor)
print(use_critic)
if use_critic:
self.critic_optim = optm.Adam(self.model.critic.parameters(), lr=lr_critic)
self.critic_loss_criterion = torch.nn.MSELoss()
else:
baseline = torch.zeros(1)
if self.use_cuda:
baseline = baseline.cuda()
for epoch in range(1):
n_graphs_proceed = 0
for X in self.train_loader:
for x in X:
self.model.train()
ratio_gcn2mind = []
ratio_gcn2rand = []
for epoch in range(n_epochs):
rewards_mindegree = 0
rewards_random = 0
x_mind = Graph(x.M)
x_rand = Graph(x.M)
x_rl = Graph(x.M)
for i in range(x.n - 2):
if i % 100 == 0:
print('iterations {}'.format(i))
node_mind, d_min = x_mind.min_degree(x_mind.M)
rewards_mindegree += x_mind.eliminate_node(node_mind, reduce=True)
rewards_random += x_rand.eliminate_node(np.random.randint(low=0, high=x_rand.n), reduce=True)
action, log_prob, reward, value_current, value_next, x_rl = self.model(x_rl)
self.model.rewards.append(reward)
self.model.actions.append(action)
self.model.saved_actions.append(SavedAction(log_prob, value_current))
R = 0
actor_losses = []
critic_losses = []
returns = []
for r in self.model.rewards[::-1]:
R = r + gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + self.eps)
saved_actions = self.model.saved_actions
for (log_prob, value_current), R in zip(saved_actions, returns):
if use_critic:
advantage = R - value_current
critic_losses.append(-value_current* advantage)
else:
advantage = R - baseline
actor_losses.append(log_prob * advantage.detach())
self.actor_optim.zero_grad()
actor_loss = torch.stack(actor_losses).sum()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
if use_critic:
self.critic_optim.zero_grad()
critic_closs = torch.stack(critic_losses).sum()
critic_closs.backward()
self.critic_optim.step()
else:
baseline = baseline.detach()
rewards_gcn = sum(self.model.rewards)
_ratio_gcn2mind = rewards_gcn / rewards_mindegree
_ratio_gcn2rand = rewards_gcn / rewards_random
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2mind ratio {}'.format(_ratio_gcn2mind),
'value {}'.format(saved_actions[0].value_current),
'R {}'.format(returns[0]))
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2rand ratio {}'.format(_ratio_gcn2rand))
ratio_gcn2mind.append(_ratio_gcn2mind)
ratio_gcn2rand.append(_ratio_gcn2rand)
del self.model.rewards[:]
del self.model.actions[:]
del self.model.saved_actions[:]
ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)
ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)
min_ratio_gcn2mind = np.min(ratio_gcn2mind)
max_ratio_gcn2mind = np.max(ratio_gcn2mind)
av_ratio_gcn2mind = np.sum(ratio_gcn2mind)/ n_epochs
min_ratio_gcn2rand = np.min(ratio_gcn2rand)
max_ratio_gcn2rand = np.max(ratio_gcn2rand)
av_ratio_gcn2rand = np.sum(ratio_gcn2rand) / n_epochs
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2mind{:04d}',
'min_ratio {}'.format(min_ratio_gcn2mind),
'max_ratio {}'.format(max_ratio_gcn2mind),
'av_ratio {}'.format(av_ratio_gcn2mind))
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2rand{:04d}',
'min_ratio {}'.format(min_ratio_gcn2rand),
'max_ratio {}'.format(max_ratio_gcn2rand),
'av_ratio {}'.format(av_ratio_gcn2rand),
'nb graph proceeded {}'.format(n_graphs_proceed))
n_graphs_proceed += len(X)
| true | true |
f71774d3bb18a2b3e70569ab8a10dcf140e26e81 | 5,989 | py | Python | submit_data.py | dimagi/submission_api_example | 266eb36c6ef6b331ea894298cbacfd2752410f80 | [
"Apache-2.0"
] | null | null | null | submit_data.py | dimagi/submission_api_example | 266eb36c6ef6b331ea894298cbacfd2752410f80 | [
"Apache-2.0"
] | 1 | 2021-12-06T20:29:54.000Z | 2021-12-13T20:32:37.000Z | submit_data.py | dimagi/submission_api_example | 266eb36c6ef6b331ea894298cbacfd2752410f80 | [
"Apache-2.0"
] | 1 | 2021-12-06T20:27:00.000Z | 2021-12-06T20:27:00.000Z | #!/usr/bin/env python3
"""
An example script to send data to CommCare using the Submission API
Usage:
$ export CCHQ_PROJECT_SPACE=my-project-space
$ export CCHQ_CASE_TYPE=person
$ export CCHQ_USERNAME=user@example.com
$ export CCHQ_PASSWORD=MijByG_se3EcKr.t
$ export CCHQ_USER_ID=c0ffeeeeeb574eb8b5d5036c9a61a483
$ export CCHQ_OWNER_ID=c0ffeeeee1e34b12bb5da0dc838e8406
$ ./submit_data.py sample_data.csv
"""
# (Optional) Configure the following settings with your values
# An XML namespace to identify your XForm submission
FORM_XMLNS = 'http://example.com/submission-api-example-form/'
# A string to identify the origin of your data
DEVICE_ID = "submission_api_example"
# End of configurable settings
import csv
import os
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime, timezone
from http.client import responses as http_responses
from typing import Any, Iterable, List, Optional, Tuple
from xml.etree import ElementTree as ET
import requests
from jinja2 import Template
COMMCARE_URL = 'https://www.commcarehq.org/'
@dataclass
class CaseProperty:
name: str
value: Any
@dataclass
class Case:
id: str # A UUID. Generated if not given in the data.
name: str # Required
type: str # A name for the case type. e.g. "person" or "site"
modified_on: str # Generated if not given. e.g. "2020-06-08T18:41:33.207Z"
owner_id: str # ID of the user or location that cases must be assigned to
properties: List[CaseProperty] # All other given data
server_modified_on: Optional[str]
def main(filename):
"""
Sends data to CommCare HQ using the Submission API.
"""
data = get_data(filename)
cases = as_cases(data)
xform_str = render_xform(cases)
success, message = submit_xform(xform_str)
return success, message
def get_data(csv_filename) -> Iterable[dict]:
"""
Reads data in CSV format from the given filename, and yields it as
dictionaries.
"""
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file)
yield from reader
def as_cases(data: Iterable[dict]) -> Iterable[Case]:
"""
Casts dictionaries as Case instances
"""
reserved = ('id', 'name', 'case_type', 'modified_on', 'server_modified_on')
for dict_ in data:
properties = [CaseProperty(name=key, value=value)
for key, value in dict_.items()
if key not in reserved]
yield Case(
id=dict_.get('id', str(uuid.uuid4())),
name=dict_['name'],
type=os.environ['CCHQ_CASE_TYPE'],
modified_on=dict_.get('modified_on', now_utc()),
owner_id=os.environ['CCHQ_OWNER_ID'],
server_modified_on=dict_.get('server_modified_on'),
properties=properties,
)
def render_xform(cases: Iterable[Case]) -> str:
context = {
'form_xmlns': FORM_XMLNS,
'device_id': DEVICE_ID,
'now_utc': now_utc(),
'cchq_username': os.environ['CCHQ_USERNAME'],
'cchq_user_id': os.environ['CCHQ_USER_ID'],
'submission_id': uuid.uuid4().hex,
'cases': list(cases),
}
with open('xform.xml.j2') as template_file:
template = Template(template_file.read())
xform = template.render(**context)
return xform
def submit_xform(xform: str) -> Tuple[bool, str]:
"""
Submits the given XForm to CommCare.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
"""
url = join_url(COMMCARE_URL,
f'/a/{os.environ["CCHQ_PROJECT_SPACE"]}/receiver/api/')
auth = (os.environ['CCHQ_USERNAME'], os.environ['CCHQ_PASSWORD'])
headers = {'Content-Type': 'text/html; charset=UTF-8'}
response = requests.post(url, xform.encode('utf-8'),
headers=headers, auth=auth)
if not 200 <= response.status_code < 300:
return False, http_responses[response.status_code]
return parse_response(response.text)
def parse_response(text: str) -> Tuple[bool, str]:
"""
Parses a CommCare HQ Submission API response.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
>>> text = '''
... <OpenRosaResponse xmlns="http://openrosa.org/http/response">
... <message nature="submit_success"> √ </message>
... </OpenRosaResponse>
... '''
>>> parse_response(text)
(True, ' √ ')
"""
xml = ET.XML(text)
message = xml.find('{http://openrosa.org/http/response}message')
success = message.attrib['nature'] == 'submit_success'
return success, message.text
def join_url(base_url: str, endpoint: str) -> str:
"""
Returns ``base_url`` + ``endpoint`` with the right forward slashes.
>>> join_url('https://example.com/', '/api/foo')
'https://example.com/api/foo'
>>> join_url('https://example.com', 'api/foo')
'https://example.com/api/foo'
"""
return '/'.join((base_url.rstrip('/'), endpoint.lstrip('/')))
def now_utc() -> str:
"""
Returns a UTC timestamp in ISO-8601 format with the offset as "Z".
e.g. "2020-06-08T18:41:33.207Z"
"""
now = datetime.now(tz=timezone.utc)
now_iso = now.isoformat(timespec='milliseconds')
now_iso_z = now_iso.replace('+00:00', 'Z')
return now_iso_z
def missing_env_vars():
env_vars = (
'CCHQ_PROJECT_SPACE',
'CCHQ_CASE_TYPE',
'CCHQ_USERNAME',
'CCHQ_PASSWORD',
'CCHQ_USER_ID',
'CCHQ_OWNER_ID',
)
return [env_var for env_var in env_vars if env_var not in os.environ]
if __name__ == '__main__':
if len(sys.argv) != 2:
print(__doc__)
sys.exit()
if missing := missing_env_vars():
print('Missing environment variables:', ', '.join(missing))
sys.exit(1)
success, message = main(sys.argv[1])
print(message)
if not success:
sys.exit(1)
| 29.357843 | 79 | 0.646185 |
FORM_XMLNS = 'http://example.com/submission-api-example-form/'
DEVICE_ID = "submission_api_example"
import csv
import os
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime, timezone
from http.client import responses as http_responses
from typing import Any, Iterable, List, Optional, Tuple
from xml.etree import ElementTree as ET
import requests
from jinja2 import Template
COMMCARE_URL = 'https://www.commcarehq.org/'
@dataclass
class CaseProperty:
name: str
value: Any
@dataclass
class Case:
id: str
name: str
type: str
modified_on: str
owner_id: str
properties: List[CaseProperty]
server_modified_on: Optional[str]
def main(filename):
data = get_data(filename)
cases = as_cases(data)
xform_str = render_xform(cases)
success, message = submit_xform(xform_str)
return success, message
def get_data(csv_filename) -> Iterable[dict]:
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file)
yield from reader
def as_cases(data: Iterable[dict]) -> Iterable[Case]:
reserved = ('id', 'name', 'case_type', 'modified_on', 'server_modified_on')
for dict_ in data:
properties = [CaseProperty(name=key, value=value)
for key, value in dict_.items()
if key not in reserved]
yield Case(
id=dict_.get('id', str(uuid.uuid4())),
name=dict_['name'],
type=os.environ['CCHQ_CASE_TYPE'],
modified_on=dict_.get('modified_on', now_utc()),
owner_id=os.environ['CCHQ_OWNER_ID'],
server_modified_on=dict_.get('server_modified_on'),
properties=properties,
)
def render_xform(cases: Iterable[Case]) -> str:
context = {
'form_xmlns': FORM_XMLNS,
'device_id': DEVICE_ID,
'now_utc': now_utc(),
'cchq_username': os.environ['CCHQ_USERNAME'],
'cchq_user_id': os.environ['CCHQ_USER_ID'],
'submission_id': uuid.uuid4().hex,
'cases': list(cases),
}
with open('xform.xml.j2') as template_file:
template = Template(template_file.read())
xform = template.render(**context)
return xform
def submit_xform(xform: str) -> Tuple[bool, str]:
url = join_url(COMMCARE_URL,
f'/a/{os.environ["CCHQ_PROJECT_SPACE"]}/receiver/api/')
auth = (os.environ['CCHQ_USERNAME'], os.environ['CCHQ_PASSWORD'])
headers = {'Content-Type': 'text/html; charset=UTF-8'}
response = requests.post(url, xform.encode('utf-8'),
headers=headers, auth=auth)
if not 200 <= response.status_code < 300:
return False, http_responses[response.status_code]
return parse_response(response.text)
def parse_response(text: str) -> Tuple[bool, str]:
xml = ET.XML(text)
message = xml.find('{http://openrosa.org/http/response}message')
success = message.attrib['nature'] == 'submit_success'
return success, message.text
def join_url(base_url: str, endpoint: str) -> str:
return '/'.join((base_url.rstrip('/'), endpoint.lstrip('/')))
def now_utc() -> str:
now = datetime.now(tz=timezone.utc)
now_iso = now.isoformat(timespec='milliseconds')
now_iso_z = now_iso.replace('+00:00', 'Z')
return now_iso_z
def missing_env_vars():
env_vars = (
'CCHQ_PROJECT_SPACE',
'CCHQ_CASE_TYPE',
'CCHQ_USERNAME',
'CCHQ_PASSWORD',
'CCHQ_USER_ID',
'CCHQ_OWNER_ID',
)
return [env_var for env_var in env_vars if env_var not in os.environ]
if __name__ == '__main__':
if len(sys.argv) != 2:
print(__doc__)
sys.exit()
if missing := missing_env_vars():
print('Missing environment variables:', ', '.join(missing))
sys.exit(1)
success, message = main(sys.argv[1])
print(message)
if not success:
sys.exit(1)
| true | true |
f717755be13370d96e8eff2b66e83a3b18716be8 | 515 | py | Python | setup.py | Krozark/meteofrance-py | 7328a857022f263d1609c939851f612c5ed13d08 | [
"MIT"
] | null | null | null | setup.py | Krozark/meteofrance-py | 7328a857022f263d1609c939851f612c5ed13d08 | [
"MIT"
] | null | null | null | setup.py | Krozark/meteofrance-py | 7328a857022f263d1609c939851f612c5ed13d08 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='krozark-meteofrance',
version='0.3.9',
description = 'Meteo-France weather forecast',
author = 'victorcerutti',
author_email = 'maxime.barbier1991+meteofrance@gmail.com',
url = 'https://github.com/Krozark/meteofrance-py',
packages=['meteofrance',],
install_requires=[
'requests',
'beautifulsoup4',
'pytz'
],
license='MIT',
long_description='Extract Meteo-France current weather and 1 hour rain forecast',
)
| 27.105263 | 85 | 0.664078 | from setuptools import setup
setup(
name='krozark-meteofrance',
version='0.3.9',
description = 'Meteo-France weather forecast',
author = 'victorcerutti',
author_email = 'maxime.barbier1991+meteofrance@gmail.com',
url = 'https://github.com/Krozark/meteofrance-py',
packages=['meteofrance',],
install_requires=[
'requests',
'beautifulsoup4',
'pytz'
],
license='MIT',
long_description='Extract Meteo-France current weather and 1 hour rain forecast',
)
| true | true |
f7177676b64b016a2006776e619b093446b0ff41 | 5,353 | py | Python | test/language/choice_types/python/UInt64ParamChoiceTest.py | PeachOS/zserio | ea01f6906c125a6baab7e8ed865eeb08cd46c37c | [
"BSD-3-Clause"
] | 2 | 2019-02-06T17:50:24.000Z | 2019-11-20T16:51:34.000Z | test/language/choice_types/python/UInt64ParamChoiceTest.py | PeachOS/zserio | ea01f6906c125a6baab7e8ed865eeb08cd46c37c | [
"BSD-3-Clause"
] | 1 | 2019-11-25T16:25:51.000Z | 2019-11-25T18:09:39.000Z | test/language/choice_types/python/UInt64ParamChoiceTest.py | PeachOS/zserio | ea01f6906c125a6baab7e8ed865eeb08cd46c37c | [
"BSD-3-Clause"
] | null | null | null | import unittest
import zserio
from testutils import getZserioApi
class UInt64ParamChoiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "choice_types.zs").uint64_param_choice
def testSelectorConstructor(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(self.VARIANT_A_SELECTOR, uint64ParamChoice.getSelector())
def testFromReader(self):
selector = self.VARIANT_B_SELECTOR
value = 234
writer = zserio.BitStreamWriter()
UInt64ParamChoiceTest._writeUInt64ParamChoiceToStream(writer, selector, value)
reader = zserio.BitStreamReader(writer.getByteArray())
uint64ParamChoice = self.api.UInt64ParamChoice.fromReader(reader, selector)
self.assertEqual(selector, uint64ParamChoice.getSelector())
self.assertEqual(value, uint64ParamChoice.getB())
def testEq(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
value = 99
uint64ParamChoice1.setA(value)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
uint64ParamChoice2.setA(value)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
def testHash(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
value = 99
uint64ParamChoice1.setA(value)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
uint64ParamChoice2.setA(value)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
def testGetSelector(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
self.assertEqual(self.VARIANT_C_SELECTOR, uint64ParamChoice.getSelector())
def testGetSetA(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
value = 99
uint64ParamChoice.setA(value)
self.assertEqual(value, uint64ParamChoice.getA())
def testGetSetB(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
value = 234
uint64ParamChoice.setB(value)
self.assertEqual(value, uint64ParamChoice.getB())
def testGetSetC(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
value = 23456
uint64ParamChoice.setC(value)
self.assertEqual(value, uint64ParamChoice.getC())
def testBitSizeOf(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(8, uint64ParamChoice.bitSizeOf())
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(16, uint64ParamChoice.bitSizeOf())
def testInitializeOffsets(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
bitPosition = 1
self.assertEqual(9, uint64ParamChoice.initializeOffsets(bitPosition))
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(17, uint64ParamChoice.initializeOffsets(bitPosition))
def testReadWrite(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
byteValue = 99
uint64ParamChoice.setA(byteValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(byteValue, readUInt64ParamChoice.getA())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
shortValue = 234
uint64ParamChoice.setB(shortValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(shortValue, readUInt64ParamChoice.getB())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
@staticmethod
def _writeUInt64ParamChoiceToStream(writer, selector, value):
if selector == 1:
writer.writeSignedBits(value, 8)
elif selector in (2, 3, 4):
writer.writeSignedBits(value, 16)
elif selector in (5, 6):
pass
else:
writer.writeSignedBits(value, 32)
VARIANT_A_SELECTOR = 1
VARIANT_B_SELECTOR = 2
VARIANT_C_SELECTOR = 7
| 40.862595 | 86 | 0.721838 | import unittest
import zserio
from testutils import getZserioApi
class UInt64ParamChoiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "choice_types.zs").uint64_param_choice
def testSelectorConstructor(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(self.VARIANT_A_SELECTOR, uint64ParamChoice.getSelector())
def testFromReader(self):
selector = self.VARIANT_B_SELECTOR
value = 234
writer = zserio.BitStreamWriter()
UInt64ParamChoiceTest._writeUInt64ParamChoiceToStream(writer, selector, value)
reader = zserio.BitStreamReader(writer.getByteArray())
uint64ParamChoice = self.api.UInt64ParamChoice.fromReader(reader, selector)
self.assertEqual(selector, uint64ParamChoice.getSelector())
self.assertEqual(value, uint64ParamChoice.getB())
def testEq(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
value = 99
uint64ParamChoice1.setA(value)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
uint64ParamChoice2.setA(value)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
def testHash(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
value = 99
uint64ParamChoice1.setA(value)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
uint64ParamChoice2.setA(value)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
def testGetSelector(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
self.assertEqual(self.VARIANT_C_SELECTOR, uint64ParamChoice.getSelector())
def testGetSetA(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
value = 99
uint64ParamChoice.setA(value)
self.assertEqual(value, uint64ParamChoice.getA())
def testGetSetB(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
value = 234
uint64ParamChoice.setB(value)
self.assertEqual(value, uint64ParamChoice.getB())
def testGetSetC(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
value = 23456
uint64ParamChoice.setC(value)
self.assertEqual(value, uint64ParamChoice.getC())
def testBitSizeOf(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(8, uint64ParamChoice.bitSizeOf())
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(16, uint64ParamChoice.bitSizeOf())
def testInitializeOffsets(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
bitPosition = 1
self.assertEqual(9, uint64ParamChoice.initializeOffsets(bitPosition))
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(17, uint64ParamChoice.initializeOffsets(bitPosition))
def testReadWrite(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
byteValue = 99
uint64ParamChoice.setA(byteValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(byteValue, readUInt64ParamChoice.getA())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
shortValue = 234
uint64ParamChoice.setB(shortValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(shortValue, readUInt64ParamChoice.getB())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
@staticmethod
def _writeUInt64ParamChoiceToStream(writer, selector, value):
if selector == 1:
writer.writeSignedBits(value, 8)
elif selector in (2, 3, 4):
writer.writeSignedBits(value, 16)
elif selector in (5, 6):
pass
else:
writer.writeSignedBits(value, 32)
VARIANT_A_SELECTOR = 1
VARIANT_B_SELECTOR = 2
VARIANT_C_SELECTOR = 7
| true | true |
f717772bd33c93521c158ce38b78042fe52c2ff5 | 52 | py | Python | subjects/__init__.py | ankit0tech/Research-Productivity-Tool | c08e39daaaa8dfa08f5eb2607986d9f6bf9f02fa | [
"MIT"
] | 1 | 2021-12-12T04:54:05.000Z | 2021-12-12T04:54:05.000Z | subjects/__init__.py | ankit0tech/Research-Productivity-Tool | c08e39daaaa8dfa08f5eb2607986d9f6bf9f02fa | [
"MIT"
] | null | null | null | subjects/__init__.py | ankit0tech/Research-Productivity-Tool | c08e39daaaa8dfa08f5eb2607986d9f6bf9f02fa | [
"MIT"
] | null | null | null | default_app_config = 'subjects.apps.SubjectsConfig'
| 26 | 51 | 0.846154 | default_app_config = 'subjects.apps.SubjectsConfig'
| true | true |
f71777438e6b24d7bdde702a1788bd674bd9b0a3 | 589 | py | Python | Appium_learning/02_ChangeApp.py | yeyuning1/AutoTT | 1ce88e9e73d71fa11d4d8ad12bd6741aa71f97d2 | [
"MIT"
] | null | null | null | Appium_learning/02_ChangeApp.py | yeyuning1/AutoTT | 1ce88e9e73d71fa11d4d8ad12bd6741aa71f97d2 | [
"MIT"
] | 1 | 2021-06-02T00:24:41.000Z | 2021-06-02T00:24:41.000Z | Appium_learning/02_ChangeApp.py | yeyuning1/AutoTT | 1ce88e9e73d71fa11d4d8ad12bd6741aa71f97d2 | [
"MIT"
] | null | null | null | import time
from appium import webdriver
from Appium_learning import app_settings
driver = webdriver.Remote('http://localhost:4723/wd/hub', app_settings.desired_caps)
print(driver.current_package)
print(driver.current_activity)
print(driver.context)
time.sleep(5)
# adb shell dumpsys window windows | findstr(grep) mFocusedApp
driver.start_activity('com.android.messaging', '.ui.conversationlist.ConversationListActivity')
print(driver.current_package)
print(driver.current_activity)
print(driver.context)
time.sleep(5)
# driver.quit() 销毁 driver 驱动对象 --> stop_client
driver.close_app()
| 31 | 95 | 0.814941 | import time
from appium import webdriver
from Appium_learning import app_settings
driver = webdriver.Remote('http://localhost:4723/wd/hub', app_settings.desired_caps)
print(driver.current_package)
print(driver.current_activity)
print(driver.context)
time.sleep(5)
driver.start_activity('com.android.messaging', '.ui.conversationlist.ConversationListActivity')
print(driver.current_package)
print(driver.current_activity)
print(driver.context)
time.sleep(5)
driver.close_app()
| true | true |
f717798fa86a8765f50f6a661d1e837315188e97 | 2,151 | py | Python | setup.py | deliri/ChatterBot | 8d95c43371bf8b7b1a1c44f77827b239bf38dc4e | [
"BSD-3-Clause"
] | 1 | 2021-03-06T00:28:20.000Z | 2021-03-06T00:28:20.000Z | setup.py | deliri/ChatterBot | 8d95c43371bf8b7b1a1c44f77827b239bf38dc4e | [
"BSD-3-Clause"
] | null | null | null | setup.py | deliri/ChatterBot | 8d95c43371bf8b7b1a1c44f77827b239bf38dc4e | [
"BSD-3-Clause"
] | 2 | 2017-05-30T02:18:30.000Z | 2021-02-21T18:15:25.000Z | #!/usr/bin/env python
"""
ChatterBot setup file.
"""
from setuptools import setup
# Dynamically retrieve the version information from the chatterbot module
CHATTERBOT = __import__('chatterbot')
VERSION = CHATTERBOT.__version__
AUTHOR = CHATTERBOT.__author__
AUTHOR_EMAIL = CHATTERBOT.__email__
URL = CHATTERBOT.__url__
DESCRIPTION = CHATTERBOT.__doc__
with open('requirements.txt') as requirements:
REQUIREMENTS = requirements.readlines()
setup(
name='ChatterBot',
version=VERSION,
url=URL,
download_url='{}/tarball/{}'.format(URL, VERSION),
setup_requires=['setuptools-markdown'],
long_description_markdown_filename='readme.md',
description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
packages=[
'chatterbot',
'chatterbot.input',
'chatterbot.output',
'chatterbot.storage',
'chatterbot.logic',
'chatterbot.corpus',
'chatterbot.conversation',
'chatterbot.ext',
'chatterbot.ext.django_chatterbot',
'chatterbot.ext.django_chatterbot.migrations',
'chatterbot.ext.django_chatterbot.management',
'chatterbot.ext.django_chatterbot.management.commands'
],
package_dir={'chatterbot': 'chatterbot'},
include_package_data=True,
install_requires=REQUIREMENTS,
license='BSD',
zip_safe=False,
platforms=['any'],
keywords=['ChatterBot', 'chatbot', 'chat', 'bot'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Environment :: Console',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=['mock']
)
| 31.173913 | 73 | 0.653185 |
from setuptools import setup
CHATTERBOT = __import__('chatterbot')
VERSION = CHATTERBOT.__version__
AUTHOR = CHATTERBOT.__author__
AUTHOR_EMAIL = CHATTERBOT.__email__
URL = CHATTERBOT.__url__
DESCRIPTION = CHATTERBOT.__doc__
with open('requirements.txt') as requirements:
REQUIREMENTS = requirements.readlines()
setup(
name='ChatterBot',
version=VERSION,
url=URL,
download_url='{}/tarball/{}'.format(URL, VERSION),
setup_requires=['setuptools-markdown'],
long_description_markdown_filename='readme.md',
description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
packages=[
'chatterbot',
'chatterbot.input',
'chatterbot.output',
'chatterbot.storage',
'chatterbot.logic',
'chatterbot.corpus',
'chatterbot.conversation',
'chatterbot.ext',
'chatterbot.ext.django_chatterbot',
'chatterbot.ext.django_chatterbot.migrations',
'chatterbot.ext.django_chatterbot.management',
'chatterbot.ext.django_chatterbot.management.commands'
],
package_dir={'chatterbot': 'chatterbot'},
include_package_data=True,
install_requires=REQUIREMENTS,
license='BSD',
zip_safe=False,
platforms=['any'],
keywords=['ChatterBot', 'chatbot', 'chat', 'bot'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Environment :: Console',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Communications :: Chat',
'Topic :: Internet',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=['mock']
)
| true | true |
f71779abb101df0998a79dffc26322edc971e6e8 | 10,423 | py | Python | seleniumbase/core/jqc_helper.py | mdmintz/seleniumspot | f5c225aa4fcd0b4124fc990e3892c36736290ce8 | [
"MIT"
] | 1 | 2015-06-17T10:16:26.000Z | 2015-06-17T10:16:26.000Z | seleniumbase/core/jqc_helper.py | mdmintz/seleniumspot | f5c225aa4fcd0b4124fc990e3892c36736290ce8 | [
"MIT"
] | null | null | null | seleniumbase/core/jqc_helper.py | mdmintz/seleniumspot | f5c225aa4fcd0b4124fc990e3892c36736290ce8 | [
"MIT"
] | null | null | null | """
This module contains methods for opening jquery-confirm boxes.
These helper methods SHOULD NOT be called directly from tests.
"""
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import js_utils
form_code = """'<form align="center" action="" class="jqc_form">' +
'<div class="form-group">' +
'<input style="font-size:20px; background-color: #f8fdfd; ' +
' width: 84%%; border: 1px solid blue; ' +
' box-shadow:inset 0 0 2px 2px #f4fafa;"' +
' type="text" class="jqc_input" />' +
'</div>' +
'</form>'"""
def jquery_confirm_button_dialog(driver, message, buttons, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
if not message:
message = ""
key_row = ""
if len(buttons) == 1: # There's only one button as an option
key_row = "keys: ['enter', 'y', '1']," # Shortcut: "Enter","Y","1"
b_html = """button_%s: {
btnClass: 'btn-%s',
text: '<b>%s</b>',
%s
action: function(){
jqc_status = '%s';
$jqc_status = jqc_status;
jconfirm.lastButtonText = jqc_status;
}
},"""
all_buttons = ""
btn_count = 0
for button in buttons:
btn_count += 1
text = button[0]
text = js_utils.escape_quotes_if_needed(text)
if len(buttons) > 1 and text.lower() == "yes":
key_row = "keys: ['y'],"
if btn_count < 10:
key_row = "keys: ['y', '%s']," % btn_count
elif len(buttons) > 1 and text.lower() == "no":
key_row = "keys: ['n'],"
if btn_count < 10:
key_row = "keys: ['n', '%s']," % btn_count
elif len(buttons) > 1:
if btn_count < 10:
key_row = "keys: ['%s']," % btn_count
color = button[1]
if not color:
color = "blue"
new_button = b_html % (btn_count, color, text, key_row, text)
all_buttons += new_button
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>',
buttons: {
%s
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
all_buttons,
)
driver.execute_script(jqcd)
def jquery_confirm_text_dialog(driver, message, button=None, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if not message:
message = ""
if button:
if not type(button) is list and not type(button) is tuple:
raise Exception('"button" should be a (text, color) tuple!')
if len(button) != 2:
raise Exception('"button" should be a (text, color) tuple!')
else:
button = ("Submit", "blue")
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
btn_text = button[0]
btn_color = button[1]
if not btn_color:
btn_color = "blue"
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>' +
%s,
buttons: {
formSubmit: {
btnClass: 'btn-%s',
text: '%s',
action: function () {
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
$jqc_status = '%s'; // There is only one button
},
},
},
onContentReady: function () {
var jc = this;
this.$content.find('form.jqc_form').on('submit', function (e) {
// User submits the form by pressing "Enter" in the field
e.preventDefault();
jc.$$formSubmit.trigger('click'); // Click the button
});
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
form_code,
btn_color,
btn_text,
btn_text,
)
driver.execute_script(jqcd)
def jquery_confirm_full_dialog(driver, message, buttons, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if not message:
message = ""
btn_count = 0
b_html = """button_%s: {
btnClass: 'btn-%s',
text: '%s',
action: function(){
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
$jqc_status = '%s';
}
},"""
b1_html = """formSubmit: {
btnClass: 'btn-%s',
text: '%s',
action: function(){
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
jqc_status = '%s';
$jqc_status = jqc_status;
jconfirm.lastButtonText = jqc_status;
}
},"""
one_button_trigger = ""
if len(buttons) == 1:
# If there's only one button, allow form submit with "Enter/Return"
one_button_trigger = "jc.$$formSubmit.trigger('click');"
all_buttons = ""
for button in buttons:
text = button[0]
text = js_utils.escape_quotes_if_needed(text)
color = button[1]
if not color:
color = "blue"
btn_count += 1
if len(buttons) == 1:
new_button = b1_html % (color, text, text)
else:
new_button = b_html % (btn_count, color, text, text)
all_buttons += new_button
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>' +
%s,
buttons: {
%s
},
onContentReady: function () {
var jc = this;
this.$content.find('form.jqc_form').on('submit', function (e) {
// User submits the form by pressing "Enter" in the field
e.preventDefault();
%s
});
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
form_code,
all_buttons,
one_button_trigger,
)
driver.execute_script(jqcd)
| 33.300319 | 75 | 0.515399 | from seleniumbase.fixtures import constants
from seleniumbase.fixtures import js_utils
form_code = """'<form align="center" action="" class="jqc_form">' +
'<div class="form-group">' +
'<input style="font-size:20px; background-color: #f8fdfd; ' +
' width: 84%%; border: 1px solid blue; ' +
' box-shadow:inset 0 0 2px 2px #f4fafa;"' +
' type="text" class="jqc_input" />' +
'</div>' +
'</form>'"""
def jquery_confirm_button_dialog(driver, message, buttons, options=None):
js_utils.activate_jquery_confirm(driver)
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
if not message:
message = ""
key_row = ""
if len(buttons) == 1:
key_row = "keys: ['enter', 'y', '1']," # Shortcut: "Enter","Y","1"
b_html = """button_%s: {
btnClass: 'btn-%s',
text: '<b>%s</b>',
%s
action: function(){
jqc_status = '%s';
$jqc_status = jqc_status;
jconfirm.lastButtonText = jqc_status;
}
},"""
all_buttons = ""
btn_count = 0
for button in buttons:
btn_count += 1
text = button[0]
text = js_utils.escape_quotes_if_needed(text)
if len(buttons) > 1 and text.lower() == "yes":
key_row = "keys: ['y'],"
if btn_count < 10:
key_row = "keys: ['y', '%s']," % btn_count
elif len(buttons) > 1 and text.lower() == "no":
key_row = "keys: ['n'],"
if btn_count < 10:
key_row = "keys: ['n', '%s']," % btn_count
elif len(buttons) > 1:
if btn_count < 10:
key_row = "keys: ['%s']," % btn_count
color = button[1]
if not color:
color = "blue"
new_button = b_html % (btn_count, color, text, key_row, text)
all_buttons += new_button
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>',
buttons: {
%s
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
all_buttons,
)
driver.execute_script(jqcd)
def jquery_confirm_text_dialog(driver, message, button=None, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if not message:
message = ""
if button:
if not type(button) is list and not type(button) is tuple:
raise Exception('"button" should be a (text, color) tuple!')
if len(button) != 2:
raise Exception('"button" should be a (text, color) tuple!')
else:
button = ("Submit", "blue")
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
btn_text = button[0]
btn_color = button[1]
if not btn_color:
btn_color = "blue"
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>' +
%s,
buttons: {
formSubmit: {
btnClass: 'btn-%s',
text: '%s',
action: function () {
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
$jqc_status = '%s'; // There is only one button
},
},
},
onContentReady: function () {
var jc = this;
this.$content.find('form.jqc_form').on('submit', function (e) {
// User submits the form by pressing "Enter" in the field
e.preventDefault();
jc.$$formSubmit.trigger('click'); // Click the button
});
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
form_code,
btn_color,
btn_text,
btn_text,
)
driver.execute_script(jqcd)
def jquery_confirm_full_dialog(driver, message, buttons, options=None):
js_utils.activate_jquery_confirm(driver)
# These defaults will be overwritten later if set
theme = constants.JqueryConfirm.DEFAULT_THEME
border_color = constants.JqueryConfirm.DEFAULT_COLOR
width = constants.JqueryConfirm.DEFAULT_WIDTH
if not message:
message = ""
btn_count = 0
b_html = """button_%s: {
btnClass: 'btn-%s',
text: '%s',
action: function(){
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
$jqc_status = '%s';
}
},"""
b1_html = """formSubmit: {
btnClass: 'btn-%s',
text: '%s',
action: function(){
jqc_input = this.$content.find('.jqc_input').val();
$jqc_input = this.$content.find('.jqc_input').val();
jconfirm.lastInputText = jqc_input;
jqc_status = '%s';
$jqc_status = jqc_status;
jconfirm.lastButtonText = jqc_status;
}
},"""
one_button_trigger = ""
if len(buttons) == 1:
# If there's only one button, allow form submit with "Enter/Return"
one_button_trigger = "jc.$$formSubmit.trigger('click');"
all_buttons = ""
for button in buttons:
text = button[0]
text = js_utils.escape_quotes_if_needed(text)
color = button[1]
if not color:
color = "blue"
btn_count += 1
if len(buttons) == 1:
new_button = b1_html % (color, text, text)
else:
new_button = b_html % (btn_count, color, text, text)
all_buttons += new_button
if options:
for option in options:
if option[0].lower() == "theme":
theme = option[1]
elif option[0].lower() == "color":
border_color = option[1]
elif option[0].lower() == "width":
width = option[1]
else:
raise Exception('Unknown option: "%s"' % option[0])
content = '<div></div><font color="#0066ee">%s</font>' % (message)
content = js_utils.escape_quotes_if_needed(content)
overlay_opacity = "0.32"
if theme.lower() == "supervan":
overlay_opacity = "0.56"
if theme.lower() == "bootstrap":
overlay_opacity = "0.64"
if theme.lower() == "modern":
overlay_opacity = "0.5"
if theme.lower() == "material":
overlay_opacity = "0.4"
jqcd = """jconfirm({
boxWidth: '%s',
useBootstrap: false,
containerFluid: true,
bgOpacity: %s,
type: '%s',
theme: '%s',
animationBounce: 1,
typeAnimated: true,
animation: 'scale',
draggable: true,
dragWindowGap: 1,
container: 'body',
title: '%s',
content: '<div></div>' +
%s,
buttons: {
%s
},
onContentReady: function () {
var jc = this;
this.$content.find('form.jqc_form').on('submit', function (e) {
// User submits the form by pressing "Enter" in the field
e.preventDefault();
%s
});
}
});""" % (
width,
overlay_opacity,
border_color,
theme,
content,
form_code,
all_buttons,
one_button_trigger,
)
driver.execute_script(jqcd)
| true | true |
f71779ec19d93ec24d41da7b598913f8c5798de5 | 1,316 | py | Python | ShowProcess.py | 4a5g0030/line_follow | 570e65fb62803f7f5062402a45654809b01b7aaa | [
"MIT"
] | 1 | 2019-06-19T18:32:28.000Z | 2019-06-19T18:32:28.000Z | ShowProcess.py | 4a5g0030/line_follow | 570e65fb62803f7f5062402a45654809b01b7aaa | [
"MIT"
] | null | null | null | ShowProcess.py | 4a5g0030/line_follow | 570e65fb62803f7f5062402a45654809b01b7aaa | [
"MIT"
] | null | null | null | import time
import sys
class ShowProcess():
# """
# 显示处理进度的类
# 调用该类相关函数即可实现处理进度的显示
# """
i = 0 # 当前的处理进度
max_steps = 0 # 总共需要处理的次数
max_arrow = 50 #进度条的长度
infoDone = 'done'
# 初始化函数,需要知道总共的处理次数
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
# 显示函数,根据当前的处理进度i显示进度
# 效果为[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]100.00%
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps) #计算显示多少个'>'
num_line = self.max_arrow - num_arrow #计算显示多少个'-'
percent = self.i * 100.0 / self.max_steps #计算完成进度,格式为xx.xx%
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r' #带输出的字符串,'\r'表示不换行回到最左边
sys.stdout.write(process_bar) #这两句打印字符到终端
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
# ————————————————
# 版权声明:本文为CSDN博主「持久决心」的原创文章,遵循CC 4.0 by-sa版权协议,转载请附上原文出处链接及本声明。
# 原文链接:https://blog.csdn.net/u013832707/article/details/73608504
| 29.909091 | 77 | 0.534195 | import time
import sys
class ShowProcess():
# 显示处理进度的类
# 调用该类相关函数即可实现处理进度的显示
# """
i = 0
max_steps = 0
max_arrow = 50
infoDone = 'done'
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps)
num_line = self.max_arrow - num_arrow
percent = self.i * 100.0 / self.max_steps
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r'
sys.stdout.write(process_bar)
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
| true | true |
f7177a2b9fd9f213e95853d3176e200b98b80f37 | 1,793 | py | Python | unitest/test_supermesh.py | JeremieMelo/ADEPT | f79f518197798735cb684b373e11cdcc8a80d872 | [
"MIT"
] | 5 | 2022-02-26T09:14:47.000Z | 2022-03-20T22:57:06.000Z | unitest/test_supermesh.py | JeremieMelo/ADEPT | f79f518197798735cb684b373e11cdcc8a80d872 | [
"MIT"
] | null | null | null | unitest/test_supermesh.py | JeremieMelo/ADEPT | f79f518197798735cb684b373e11cdcc8a80d872 | [
"MIT"
] | null | null | null | '''
Description:
Author: Jiaqi Gu (jqgu@utexas.edu)
Date: 2021-09-27 23:48:01
LastEditors: Jiaqi Gu (jqgu@utexas.edu)
LastEditTime: 2022-02-26 02:22:52
'''
import torch
from core.models.layers.super_mesh import super_layer_name_dict
def test():
device=torch.device("cuda:0")
p, q, k = 2, 2, 4
x = torch.eye(k, dtype=torch.cfloat, device=device).unsqueeze(0).repeat(q,1,1).permute(1,0,2).contiguous()
sigma = torch.ones(p,q,k, device=device)
# x [bs, q, k]
arch = dict(
n_waveguides=k,
n_front_share_waveguides=k,
n_front_share_ops=k,
n_blocks=4,
n_layers_per_block=2,
n_front_share_blocks=2,
share_ps="row_col",
interleave_dc=True,
)
sample_arch = [
k//3,1,
k//2,1,
k//2,1,
k//2,1,
4
]
layer = super_layer_name_dict["ps_dc_cr"](arch, device=device)
super_ps_layers = layer.build_ps_layser(grid_dim_x=q, grid_dim_y=p)
for m in super_ps_layers:
# m.reset_parameters(alg="identity")
m.reset_parameters(alg="uniform")
layer.set_sample_arch(sample_arch)
print(layer)
layer.set_identity_cr()
layer.build_sampling_coefficients()
layer.set_gumbel_temperature(0.1)
layer.set_aux_skip_path(0)
layer.build_arch_mask()
U,V = layer.get_UV(super_ps_layers, q, p)
print(U, U.size())
print(U[0,0].conj().t().matmul(U[0,0]))
print(V)
print(V[0,0].conj().t().matmul(V[0,0]))
weight = layer.get_weight_matrix(super_ps_layers, sigma)
print(weight)
weight.sum().backward()
print(super_ps_layers[0].weight.grad.norm(p=2))
print(layer.super_layers_all[0].weight.grad.norm(p=2))
print(layer.super_layers_all[1].weight.grad.norm(p=2))
if __name__ == "__main__":
test()
| 28.460317 | 110 | 0.644172 | import torch
from core.models.layers.super_mesh import super_layer_name_dict
def test():
device=torch.device("cuda:0")
p, q, k = 2, 2, 4
x = torch.eye(k, dtype=torch.cfloat, device=device).unsqueeze(0).repeat(q,1,1).permute(1,0,2).contiguous()
sigma = torch.ones(p,q,k, device=device)
arch = dict(
n_waveguides=k,
n_front_share_waveguides=k,
n_front_share_ops=k,
n_blocks=4,
n_layers_per_block=2,
n_front_share_blocks=2,
share_ps="row_col",
interleave_dc=True,
)
sample_arch = [
k//3,1,
k//2,1,
k//2,1,
k//2,1,
4
]
layer = super_layer_name_dict["ps_dc_cr"](arch, device=device)
super_ps_layers = layer.build_ps_layser(grid_dim_x=q, grid_dim_y=p)
for m in super_ps_layers:
m.reset_parameters(alg="uniform")
layer.set_sample_arch(sample_arch)
print(layer)
layer.set_identity_cr()
layer.build_sampling_coefficients()
layer.set_gumbel_temperature(0.1)
layer.set_aux_skip_path(0)
layer.build_arch_mask()
U,V = layer.get_UV(super_ps_layers, q, p)
print(U, U.size())
print(U[0,0].conj().t().matmul(U[0,0]))
print(V)
print(V[0,0].conj().t().matmul(V[0,0]))
weight = layer.get_weight_matrix(super_ps_layers, sigma)
print(weight)
weight.sum().backward()
print(super_ps_layers[0].weight.grad.norm(p=2))
print(layer.super_layers_all[0].weight.grad.norm(p=2))
print(layer.super_layers_all[1].weight.grad.norm(p=2))
if __name__ == "__main__":
test()
| true | true |
f7177a7169c09bf58f9be260fccfe1d0276b2e83 | 712 | py | Python | parsons/google/utitities.py | Tomiiwa/parsons | 3886327c197e357ba5342603d8409774a541333b | [
"Apache-2.0"
] | 3 | 2019-09-05T16:57:15.000Z | 2019-10-01T19:56:58.000Z | parsons/google/utitities.py | Tomiiwa/parsons | 3886327c197e357ba5342603d8409774a541333b | [
"Apache-2.0"
] | 22 | 2019-09-03T13:23:37.000Z | 2019-10-03T20:32:48.000Z | parsons/google/utitities.py | Tomiiwa/parsons | 3886327c197e357ba5342603d8409774a541333b | [
"Apache-2.0"
] | 2 | 2019-09-01T18:30:10.000Z | 2019-10-03T20:07:46.000Z | from parsons.utilities import files
from parsons.utilities import check_env
import json
import os
def setup_google_application_credentials(app_creds, env_var_name='GOOGLE_APPLICATION_CREDENTIALS'):
# Detect if app_creds is a dict, path string or json string, and if it is a
# json string, then convert it to a temporary file. Then set the
# environmental variable.
credentials = check_env.check(env_var_name, app_creds)
try:
if (type(credentials) is dict):
credentials = json.dumps(credentials)
creds_path = files.string_to_temp_file(credentials, suffix='.json')
except ValueError:
creds_path = credentials
os.environ[env_var_name] = creds_path
| 35.6 | 99 | 0.738764 | from parsons.utilities import files
from parsons.utilities import check_env
import json
import os
def setup_google_application_credentials(app_creds, env_var_name='GOOGLE_APPLICATION_CREDENTIALS'):
credentials = check_env.check(env_var_name, app_creds)
try:
if (type(credentials) is dict):
credentials = json.dumps(credentials)
creds_path = files.string_to_temp_file(credentials, suffix='.json')
except ValueError:
creds_path = credentials
os.environ[env_var_name] = creds_path
| true | true |
f7177a79f201b728ccb90ed68b5736930baa2a1a | 4,126 | py | Python | xsimlab/ipython.py | jvail/xarray-simlab | 3e8cb81775868e3e7c6495489ba351567e0d7e42 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 48 | 2017-06-19T16:31:37.000Z | 2021-04-26T04:42:48.000Z | xsimlab/ipython.py | jvail/xarray-simlab | 3e8cb81775868e3e7c6495489ba351567e0d7e42 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 108 | 2017-06-26T12:22:10.000Z | 2021-03-09T08:57:02.000Z | xsimlab/ipython.py | jvail/xarray-simlab | 3e8cb81775868e3e7c6495489ba351567e0d7e42 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 10 | 2017-08-11T04:56:20.000Z | 2021-03-01T16:46:55.000Z | import textwrap
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core import magic_arguments
import attr
from .formatting import format_var_dims
from .model import Model
from .utils import variables_dict
setup_template = """
import xsimlab as xs
ds_in = xs.create_setup(
model={model},
clocks={{}},
input_vars={{
{in_vars}
}},
output_vars={{}}
)
"""
def format_var_comment(var, verbose=0):
comment = ""
if verbose:
var_desc = var.metadata["description"]
if var_desc:
comment += textwrap.fill(
var_desc, width=86, initial_indent="# ", subsequent_indent="# "
)
else:
comment += "# ---"
comment += "\n"
if verbose > 1:
var_dims = format_var_dims(var)
if var_dims:
comment += f"# dimensions: {var_dims}\n"
if var.metadata["static"]:
comment += f"# static: main clock dimension not supported\n"
if verbose > 2:
var_attrs = var.metadata.get("attrs", False)
if var_attrs:
for k, v in var_attrs.items():
comment += f"# {k}: {v}\n"
return comment
def format_input_vars(
model, skip_default=False, default=False, verbose=0, nested=False
):
lines = []
for pn, vnames in model.input_vars_dict.items():
plines = []
for vn in vnames:
var = variables_dict(type(model[pn]))[vn]
if skip_default and var.default is not attr.NOTHING:
continue
if default and var.default is not attr.NOTHING:
default_val = f"{var.default!r}"
else:
default_val = ""
comment = format_var_comment(var, verbose=verbose)
if nested:
plines.append(comment + f"'{vn}': {default_val},")
else:
lines.append(comment + f"'{pn}__{vn}': {default_val},")
if nested and plines:
pfmt = textwrap.indent("\n".join(plines), " " * 4)
lines.append(f"'{pn}': {{\n{pfmt}\n}},")
return textwrap.indent("\n".join(lines), " " * 8)[8:]
@magics_class
class SimulationMagics(Magics):
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument("model", help="xsimlab.Model object")
@magic_arguments.argument(
"-s",
"--skip-default",
action="store_true",
default=False,
help="Don't add input variables that have default values",
)
@magic_arguments.argument(
"-d",
"--default",
action="store_true",
default=False,
help="Add input variables default values, if any (ignored if --skip-default)",
)
@magic_arguments.argument(
"-v",
"--verbose",
action="count",
default=0,
help="Increase verbosity (i.e., add more input variables info as comments)",
)
@magic_arguments.argument(
"-n",
"--nested",
action="store_true",
default=False,
help="Group input variables by process",
)
def create_setup(self, line=""):
"""Pre-fill the current cell with a new simulation setup."""
args = magic_arguments.parse_argstring(self.create_setup, line)
model_obj = self.shell.user_ns.get(args.model)
if model_obj is None:
raise KeyError(f"Model '{args.model}' not defined or not imported")
elif not isinstance(model_obj, Model):
raise TypeError(f"'{args.model}' is not a xsimlab.Model object")
rendered = setup_template.format(
model=args.model,
in_vars=format_input_vars(
model_obj,
skip_default=args.skip_default,
default=args.default,
verbose=args.verbose,
nested=args.nested,
),
)
content = f"# %create_setup {line}" + rendered
self.shell.set_next_input(content, replace=True)
def load_ipython_extension(ipython):
ipython.register_magics(SimulationMagics)
| 27.506667 | 86 | 0.574649 | import textwrap
from IPython.core.magic import Magics, magics_class, line_magic
from IPython.core import magic_arguments
import attr
from .formatting import format_var_dims
from .model import Model
from .utils import variables_dict
setup_template = """
import xsimlab as xs
ds_in = xs.create_setup(
model={model},
clocks={{}},
input_vars={{
{in_vars}
}},
output_vars={{}}
)
"""
def format_var_comment(var, verbose=0):
comment = ""
if verbose:
var_desc = var.metadata["description"]
if var_desc:
comment += textwrap.fill(
var_desc, width=86, initial_indent="# ", subsequent_indent="# "
)
else:
comment += "# ---"
comment += "\n"
if verbose > 1:
var_dims = format_var_dims(var)
if var_dims:
comment += f"# dimensions: {var_dims}\n"
if var.metadata["static"]:
comment += f"# static: main clock dimension not supported\n"
if verbose > 2:
var_attrs = var.metadata.get("attrs", False)
if var_attrs:
for k, v in var_attrs.items():
comment += f"# {k}: {v}\n"
return comment
def format_input_vars(
model, skip_default=False, default=False, verbose=0, nested=False
):
lines = []
for pn, vnames in model.input_vars_dict.items():
plines = []
for vn in vnames:
var = variables_dict(type(model[pn]))[vn]
if skip_default and var.default is not attr.NOTHING:
continue
if default and var.default is not attr.NOTHING:
default_val = f"{var.default!r}"
else:
default_val = ""
comment = format_var_comment(var, verbose=verbose)
if nested:
plines.append(comment + f"'{vn}': {default_val},")
else:
lines.append(comment + f"'{pn}__{vn}': {default_val},")
if nested and plines:
pfmt = textwrap.indent("\n".join(plines), " " * 4)
lines.append(f"'{pn}': {{\n{pfmt}\n}},")
return textwrap.indent("\n".join(lines), " " * 8)[8:]
@magics_class
class SimulationMagics(Magics):
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument("model", help="xsimlab.Model object")
@magic_arguments.argument(
"-s",
"--skip-default",
action="store_true",
default=False,
help="Don't add input variables that have default values",
)
@magic_arguments.argument(
"-d",
"--default",
action="store_true",
default=False,
help="Add input variables default values, if any (ignored if --skip-default)",
)
@magic_arguments.argument(
"-v",
"--verbose",
action="count",
default=0,
help="Increase verbosity (i.e., add more input variables info as comments)",
)
@magic_arguments.argument(
"-n",
"--nested",
action="store_true",
default=False,
help="Group input variables by process",
)
def create_setup(self, line=""):
args = magic_arguments.parse_argstring(self.create_setup, line)
model_obj = self.shell.user_ns.get(args.model)
if model_obj is None:
raise KeyError(f"Model '{args.model}' not defined or not imported")
elif not isinstance(model_obj, Model):
raise TypeError(f"'{args.model}' is not a xsimlab.Model object")
rendered = setup_template.format(
model=args.model,
in_vars=format_input_vars(
model_obj,
skip_default=args.skip_default,
default=args.default,
verbose=args.verbose,
nested=args.nested,
),
)
content = f"# %create_setup {line}" + rendered
self.shell.set_next_input(content, replace=True)
def load_ipython_extension(ipython):
ipython.register_magics(SimulationMagics)
| true | true |
f7177b1e4046a2c4d8c4f139594073d0ad624f46 | 752 | py | Python | api/tournaments/migrations/0002_auto_20190804_1830.py | individuo7/wololo-tournaments-api | 5be6284064373e99346d39c78844e454c41c501d | [
"MIT"
] | 2 | 2019-12-09T10:19:36.000Z | 2020-01-11T11:48:41.000Z | api/tournaments/migrations/0002_auto_20190804_1830.py | individuo7/wololo-tournaments-api | 5be6284064373e99346d39c78844e454c41c501d | [
"MIT"
] | null | null | null | api/tournaments/migrations/0002_auto_20190804_1830.py | individuo7/wololo-tournaments-api | 5be6284064373e99346d39c78844e454c41c501d | [
"MIT"
] | null | null | null | # Generated by Django 2.2.3 on 2019-08-04 18:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournaments', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='game',
name='tournament',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='games', to='tournaments.Tournament'),
),
migrations.AlterField(
model_name='playergame',
name='team',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='tournaments.Team'),
preserve_default=False,
),
]
| 28.923077 | 132 | 0.630319 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournaments', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='game',
name='tournament',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='games', to='tournaments.Tournament'),
),
migrations.AlterField(
model_name='playergame',
name='team',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='tournaments.Team'),
preserve_default=False,
),
]
| true | true |
f7177be77bf1953daf64f669f1516a9413569e6f | 1,785 | py | Python | model/contact.py | dorotan/pythontraining | 13cd9d5d8b0c772951e9caf98166118e7ffa387c | [
"Apache-2.0"
] | null | null | null | model/contact.py | dorotan/pythontraining | 13cd9d5d8b0c772951e9caf98166118e7ffa387c | [
"Apache-2.0"
] | null | null | null | model/contact.py | dorotan/pythontraining | 13cd9d5d8b0c772951e9caf98166118e7ffa387c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'dorota'
from sys import maxsize
class Contact:
def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None,
address=None, home_number=None, mobile_number=None, work_number=None, fax=None, first_email=None,
second_email=None, third_email=None, wwwpage=None, birth_year=None, anniversary_year=None,
second_address=None, second_private_number=None, notes=None, id= None, all_phones_from_homepage=None):
self.first_name = first_name
self.middle_name = middle_name
self.last_name = last_name
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.home_number = home_number
self.mobile_number = mobile_number
self.work_number = work_number
self.fax = fax
self.first_email = first_email
self.second_email = second_email
self.third_email = third_email
self.wwwpage = wwwpage
self.birth_year = birth_year
self.anniversary_year = anniversary_year
self.second_address = second_address
self.second_private_number = second_private_number
self.notes = notes
self.id = id
self.all_phones_from_homepage=all_phones_from_homepage
def __repr__(self):
return "%s:%s %s" % (self.id, self.first_name, self.last_name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id)\
and self.first_name == other.first_name and self.last_name == other.last_name
def id_or_max(con):
if con.id:
return int(con.id)
else:
return maxsize | 38.804348 | 119 | 0.656022 |
__author__ = 'dorota'
from sys import maxsize
class Contact:
def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None, title=None, company=None,
address=None, home_number=None, mobile_number=None, work_number=None, fax=None, first_email=None,
second_email=None, third_email=None, wwwpage=None, birth_year=None, anniversary_year=None,
second_address=None, second_private_number=None, notes=None, id= None, all_phones_from_homepage=None):
self.first_name = first_name
self.middle_name = middle_name
self.last_name = last_name
self.nickname = nickname
self.title = title
self.company = company
self.address = address
self.home_number = home_number
self.mobile_number = mobile_number
self.work_number = work_number
self.fax = fax
self.first_email = first_email
self.second_email = second_email
self.third_email = third_email
self.wwwpage = wwwpage
self.birth_year = birth_year
self.anniversary_year = anniversary_year
self.second_address = second_address
self.second_private_number = second_private_number
self.notes = notes
self.id = id
self.all_phones_from_homepage=all_phones_from_homepage
def __repr__(self):
return "%s:%s %s" % (self.id, self.first_name, self.last_name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id)\
and self.first_name == other.first_name and self.last_name == other.last_name
def id_or_max(con):
if con.id:
return int(con.id)
else:
return maxsize | true | true |
f7177c158a0506efce3af2ecad52f923c731c8ea | 1,903 | py | Python | inference_exploration/cpu/main.py | nbortolotti/tflite-tpu-experiences | 8f613e059335d1d90886282f005261917fd9cfd3 | [
"Apache-2.0"
] | 1 | 2019-12-06T12:58:33.000Z | 2019-12-06T12:58:33.000Z | inference_exploration/cpu/main.py | nbortolotti/tflite-tpu-experiences | 8f613e059335d1d90886282f005261917fd9cfd3 | [
"Apache-2.0"
] | 9 | 2020-10-12T13:57:32.000Z | 2021-09-16T19:38:26.000Z | inference_exploration/cpu/main.py | nbortolotti/tflite-tpu-experiences | 8f613e059335d1d90886282f005261917fd9cfd3 | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import PIL.Image as Image
import matplotlib.pylab as plt
import time
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def image_analysis(classifier, image_shape, img_array):
result = classifier.predict(img_array[np.newaxis, ...])
# result.shape
predicted_class = np.argmax(result[0], axis=-1)
return predicted_class
def main():
classifier_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/4"
image_shape = (224, 224)
classifier = tf.keras.Sequential([
hub.KerasLayer(classifier_url, input_shape=image_shape + (3,))
])
img_file = tf.keras.utils.get_file('image.jpg', 'https://storage.googleapis.com/demostration_images/2.jpg')
img = Image.open(img_file).resize(image_shape)
img_array = np.array(img) / 255.0
# img_array.shape
predicted_class = image_analysis(classifier, image_shape, img_array)
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt',
'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
#
# plt.imshow(img_array)
# plt.axis('off')
# predicted_class_name = imagenet_labels[predicted_class]
# _ = plt.title("Prediction: " + predicted_class_name.title())
# plt.show()
for _ in range(5):
inferenceTime(img_array, classifier)
# explore time to do the inference
def inferenceTime(image, mClassifier):
start = time.time()
result = mClassifier.predict(image[np.newaxis, ...])
end = time.time()
print((end - start)*1000) #milliseconds
# predicted_class = np.argmax(result[0], axis=-1)
# predicted_class_name = mLabels[predicted_class]
if __name__ == '__main__':
main()
| 30.206349 | 123 | 0.695218 | import os
import numpy as np
import PIL.Image as Image
import matplotlib.pylab as plt
import time
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import layers
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def image_analysis(classifier, image_shape, img_array):
result = classifier.predict(img_array[np.newaxis, ...])
predicted_class = np.argmax(result[0], axis=-1)
return predicted_class
def main():
classifier_url = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/4"
image_shape = (224, 224)
classifier = tf.keras.Sequential([
hub.KerasLayer(classifier_url, input_shape=image_shape + (3,))
])
img_file = tf.keras.utils.get_file('image.jpg', 'https://storage.googleapis.com/demostration_images/2.jpg')
img = Image.open(img_file).resize(image_shape)
img_array = np.array(img) / 255.0
predicted_class = image_analysis(classifier, image_shape, img_array)
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt',
'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
imagenet_labels = np.array(open(labels_path).read().splitlines())
for _ in range(5):
inferenceTime(img_array, classifier)
def inferenceTime(image, mClassifier):
start = time.time()
result = mClassifier.predict(image[np.newaxis, ...])
end = time.time()
print((end - start)*1000)
if __name__ == '__main__':
main()
| true | true |
f7177d19acc9ab604ff8ca8dd4f7629ce32e4671 | 203 | py | Python | EOC/prototype/data/datasets/__init__.py | double-fire-0/SystemNoise | ab042dd54371482a18117eb13f816a7472e51590 | [
"Apache-2.0"
] | null | null | null | EOC/prototype/data/datasets/__init__.py | double-fire-0/SystemNoise | ab042dd54371482a18117eb13f816a7472e51590 | [
"Apache-2.0"
] | null | null | null | EOC/prototype/data/datasets/__init__.py | double-fire-0/SystemNoise | ab042dd54371482a18117eb13f816a7472e51590 | [
"Apache-2.0"
] | null | null | null | from .imagenet_dataset import ImageNetDataset, RankedImageNetDataset, DecoderResizeImageNetDataset # noqa
from .custom_dataset import CustomDataset # noqa
from .imagnetc import ImageNet_C_Dataset
| 33.833333 | 106 | 0.842365 | from .imagenet_dataset import ImageNetDataset, RankedImageNetDataset, DecoderResizeImageNetDataset
from .custom_dataset import CustomDataset
from .imagnetc import ImageNet_C_Dataset
| true | true |
f7177d37c526cd723adac7c722303a77bd48abdf | 418 | py | Python | blog/migrations/0006_auto_20220427_1014.py | ali-abbaszade/mysite | 9ef1b1211bd827c178f279e69ddbf4c229c539fa | [
"MIT"
] | null | null | null | blog/migrations/0006_auto_20220427_1014.py | ali-abbaszade/mysite | 9ef1b1211bd827c178f279e69ddbf4c229c539fa | [
"MIT"
] | null | null | null | blog/migrations/0006_auto_20220427_1014.py | ali-abbaszade/mysite | 9ef1b1211bd827c178f279e69ddbf4c229c539fa | [
"MIT"
] | null | null | null | # Generated by Django 3.2.12 on 2022-04-27 05:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20220427_1002'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='category',
),
migrations.DeleteModel(
name='Category',
),
]
| 19.904762 | 49 | 0.538278 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20220427_1002'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='category',
),
migrations.DeleteModel(
name='Category',
),
]
| true | true |
f7177e32b5e25c3c506bb440f649164a5758e294 | 46,139 | py | Python | manila/share/drivers/generic.py | vponomaryov/manila | ffe135a5b35a0964179f0dc148d569037f26a929 | [
"Apache-2.0"
] | null | null | null | manila/share/drivers/generic.py | vponomaryov/manila | ffe135a5b35a0964179f0dc148d569037f26a929 | [
"Apache-2.0"
] | null | null | null | manila/share/drivers/generic.py | vponomaryov/manila | ffe135a5b35a0964179f0dc148d569037f26a929 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Driver for shares."""
import os
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import units
import retrying
import six
from manila.common import constants as const
from manila import compute
from manila import context
from manila import exception
from manila.i18n import _, _LE, _LI, _LW
from manila.share import driver
from manila.share.drivers import service_instance
from manila import utils
from manila import volume
LOG = log.getLogger(__name__)
share_opts = [
cfg.StrOpt('smb_template_config_path',
default='$state_path/smb.conf',
help="Path to smb config."),
cfg.StrOpt('volume_name_template',
default='manila-share-%s',
help="Volume name template."),
cfg.StrOpt('volume_snapshot_name_template',
default='manila-snapshot-%s',
help="Volume snapshot name template."),
cfg.StrOpt('share_mount_path',
default='/shares',
help="Parent path in service instance where shares "
"will be mounted."),
cfg.IntOpt('max_time_to_create_volume',
default=180,
help="Maximum time to wait for creating cinder volume."),
cfg.IntOpt('max_time_to_extend_volume',
default=180,
help="Maximum time to wait for extending cinder volume."),
cfg.IntOpt('max_time_to_attach',
default=120,
help="Maximum time to wait for attaching cinder volume."),
cfg.StrOpt('service_instance_smb_config_path',
default='$share_mount_path/smb.conf',
help="Path to SMB config in service instance."),
cfg.ListOpt('share_helpers',
default=[
'CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess',
'NFS=manila.share.drivers.helpers.NFSHelper',
],
help='Specify list of share export helpers.'),
cfg.StrOpt('share_volume_fstype',
default='ext4',
choices=['ext4', 'ext3'],
help='Filesystem type of the share volume.'),
cfg.StrOpt('cinder_volume_type',
help='Name or id of cinder volume type which will be used '
'for all volumes created by driver.'),
]
CONF = cfg.CONF
CONF.register_opts(share_opts)
# NOTE(u_glide): These constants refer to the column number in the "df" output
BLOCK_DEVICE_SIZE_INDEX = 1
USED_SPACE_INDEX = 2
def ensure_server(f):
def wrap(self, context, *args, **kwargs):
server = kwargs.get('share_server')
if not self.driver_handles_share_servers:
if not server:
server = self.service_instance_manager.get_common_server()
kwargs['share_server'] = server
else:
raise exception.ManilaException(
_("Share server handling is not available. "
"But 'share_server' was provided. '%s'. "
"Share network should not be used.") % server.get('id'))
elif not server:
raise exception.ManilaException(
_("Share server handling is enabled. But 'share_server' "
"is not provided. Make sure you used 'share_network'."))
if not server.get('backend_details'):
raise exception.ManilaException(
_("Share server '%s' does not have backend details.") %
server['id'])
if not self.service_instance_manager.ensure_service_instance(
context, server['backend_details']):
raise exception.ServiceInstanceUnavailable()
return f(self, context, *args, **kwargs)
return wrap
class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
"""Executes commands relating to Shares."""
def __init__(self, *args, **kwargs):
"""Do initialization."""
super(GenericShareDriver, self).__init__(
[False, True], *args, **kwargs)
self.admin_context = context.get_admin_context()
self.configuration.append_config_values(share_opts)
self._helpers = {}
self.backend_name = self.configuration.safe_get(
'share_backend_name') or "Cinder_Volumes"
self.ssh_connections = {}
self._setup_service_instance_manager()
self.private_storage = kwargs.get('private_storage')
def _setup_service_instance_manager(self):
self.service_instance_manager = (
service_instance.ServiceInstanceManager(
driver_config=self.configuration))
def _ssh_exec(self, server, command, check_exit_code=True):
connection = self.ssh_connections.get(server['instance_id'])
ssh_conn_timeout = self.configuration.ssh_conn_timeout
if not connection:
ssh_pool = utils.SSHPool(server['ip'],
22,
ssh_conn_timeout,
server['username'],
server.get('password'),
server.get('pk_path'),
max_size=1)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
else:
ssh_pool, ssh = connection
if not ssh.get_transport().is_active():
ssh_pool.remove(ssh)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
# (aovchinnikov): ssh_execute does not behave well when passed
# parameters with spaces.
wrap = lambda token: "\"" + token + "\""
command = [wrap(tkn) if tkn.count(' ') else tkn for tkn in command]
return processutils.ssh_execute(ssh, ' '.join(command),
check_exit_code=check_exit_code)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
def do_setup(self, context):
"""Any initialization the generic driver does while starting."""
super(GenericShareDriver, self).do_setup(context)
self.compute_api = compute.API()
self.volume_api = volume.API()
self._setup_helpers()
common_sv_available = False
share_server = None
sv_fetch_retry_interval = 5
while not (common_sv_available or self.driver_handles_share_servers):
try:
# Verify availability of common server
share_server = (
self.service_instance_manager.get_common_server())
common_sv_available = self._is_share_server_active(
context, share_server)
except Exception as ex:
LOG.error(ex)
if not common_sv_available:
time.sleep(sv_fetch_retry_interval)
LOG.warning(_LW("Waiting for the common service VM to become "
"available. "
"Driver is currently uninitialized. "
"Share server: %(share_server)s "
"Retry interval: %(retry_interval)s"),
dict(share_server=share_server,
retry_interval=sv_fetch_retry_interval))
def _setup_helpers(self):
"""Initializes protocol-specific NAS drivers."""
helpers = self.configuration.share_helpers
if helpers:
for helper_str in helpers:
share_proto, __, import_str = helper_str.partition('=')
helper = importutils.import_class(import_str)
self._helpers[share_proto.upper()] = helper(
self._execute,
self._ssh_exec,
self.configuration)
else:
raise exception.ManilaException(
"No protocol helpers selected for Generic Driver. "
"Please specify using config option 'share_helpers'.")
@ensure_server
def create_share(self, context, share, share_server=None):
"""Creates share."""
return self._create_share(
context, share,
snapshot=None,
share_server=share_server,
)
def _create_share(self, context, share, snapshot, share_server=None):
helper = self._get_helper(share)
server_details = share_server['backend_details']
volume = self._allocate_container(
self.admin_context, share, snapshot=snapshot)
volume = self._attach_volume(
self.admin_context, share, server_details['instance_id'], volume)
if not snapshot:
self._format_device(server_details, volume)
self._mount_device(share, server_details, volume)
export_locations = helper.create_exports(
server_details, share['name'])
return export_locations
@utils.retry(exception.ProcessExecutionError, backoff_rate=1)
def _is_device_file_available(self, server_details, volume):
"""Checks whether the device file is available"""
command = ['sudo', 'test', '-b', volume['mountpoint']]
self._ssh_exec(server_details, command)
def _format_device(self, server_details, volume):
"""Formats device attached to the service vm."""
self._is_device_file_available(server_details, volume)
command = ['sudo', 'mkfs.%s' % self.configuration.share_volume_fstype,
volume['mountpoint']]
self._ssh_exec(server_details, command)
def _is_device_mounted(self, mount_path, server_details, volume=None):
"""Checks whether volume already mounted or not."""
log_data = {
'mount_path': mount_path,
'server_id': server_details['instance_id'],
}
if volume and volume.get('mountpoint', ''):
log_data['volume_id'] = volume['id']
log_data['dev_mount_path'] = volume['mountpoint']
msg = ("Checking whether volume '%(volume_id)s' with mountpoint "
"'%(dev_mount_path)s' is mounted on mount path '%(mount_p"
"ath)s' on server '%(server_id)s' or not." % log_data)
else:
msg = ("Checking whether mount path '%(mount_path)s' exists on "
"server '%(server_id)s' or not." % log_data)
LOG.debug(msg)
mounts_list_cmd = ['sudo', 'mount']
output, __ = self._ssh_exec(server_details, mounts_list_cmd)
mounts = output.split('\n')
for mount in mounts:
mount_elements = mount.split(' ')
if (len(mount_elements) > 2 and mount_path == mount_elements[2]):
if volume:
# Mount goes with device path and mount path
if (volume.get('mountpoint', '') == mount_elements[0]):
return True
else:
# Unmount goes only by mount path
return True
return False
def _sync_mount_temp_and_perm_files(self, server_details):
"""Sync temporary and permanent files for mounted filesystems."""
try:
self._ssh_exec(
server_details,
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE],
)
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to sync mount files on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
try:
# Remount it to avoid postponed point of failure
self._ssh_exec(server_details, ['sudo', 'mount', '-a'])
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to mount all shares on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
def _mount_device(self, share, server_details, volume):
"""Mounts block device to the directory on service vm.
Mounts attached and formatted block device to the directory if not
mounted yet.
"""
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _mount_device_with_lock():
mount_path = self._get_mount_path(share)
device_path = volume['mountpoint']
log_data = {
'dev': device_path,
'path': mount_path,
'server': server_details['instance_id'],
}
try:
if not self._is_device_mounted(mount_path, server_details,
volume):
LOG.debug("Mounting '%(dev)s' to path '%(path)s' on "
"server '%(server)s'.", log_data)
mount_cmd = (
'sudo', 'mkdir', '-p', mount_path,
'&&', 'sudo', 'mount', device_path, mount_path,
'&&', 'sudo', 'chmod', '777', mount_path,
'&&', 'sudo', 'umount', mount_path,
# NOTE(vponomaryov): 'tune2fs' is required to make
# filesystem of share created from snapshot have
# unique ID, in case of LVM volumes, by default,
# it will have the same UUID as source volume one.
# 'tune2fs' command can be executed only when device
# is not mounted and also, in current case, it takes
# effect only after it was mounted. Closes #1645751
'&&', 'sudo', 'tune2fs', '-U', 'random', device_path,
'&&', 'sudo', 'mount', device_path, mount_path,
)
self._ssh_exec(server_details, mount_cmd)
# Add mount permanently
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' already exists on "
"server '%(server)s'."), log_data)
except exception.ProcessExecutionError as e:
raise exception.ShareBackendException(msg=six.text_type(e))
return _mount_device_with_lock()
@utils.retry(exception.ProcessExecutionError)
def _unmount_device(self, share, server_details):
"""Unmounts block device from directory on service vm."""
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _unmount_device_with_lock():
mount_path = self._get_mount_path(share)
log_data = {
'path': mount_path,
'server': server_details['instance_id'],
}
if self._is_device_mounted(mount_path, server_details):
LOG.debug("Unmounting path '%(path)s' on server "
"'%(server)s'.", log_data)
unmount_cmd = ['sudo', 'umount', mount_path, '&&', 'sudo',
'rmdir', mount_path]
self._ssh_exec(server_details, unmount_cmd)
# Remove mount permanently
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' does not exist on "
"server '%(server)s'."), log_data)
return _unmount_device_with_lock()
def _get_mount_path(self, share):
"""Returns the path to use for mount device in service vm."""
return os.path.join(self.configuration.share_mount_path, share['name'])
def _attach_volume(self, context, share, instance_id, volume):
"""Attaches cinder volume to service vm."""
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_attach(volume):
if volume['status'] == 'in-use':
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
if volume['id'] in attached_volumes:
return volume
else:
raise exception.ManilaException(
_('Volume %s is already attached to another instance')
% volume['id'])
@retrying.retry(stop_max_attempt_number=3,
wait_fixed=2000,
retry_on_exception=lambda exc: True)
def attach_volume():
self.compute_api.instance_volume_attach(
self.admin_context, instance_id, volume['id'])
attach_volume()
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] == 'in-use':
return volume
elif volume['status'] != 'attaching':
raise exception.ManilaException(
_('Failed to attach volume %s') % volume['id'])
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been attached in '
'%(max_time)ss. Giving up.') % err_msg)
return do_attach(volume)
def _get_volume_name(self, share_id):
return self.configuration.volume_name_template % share_id
def _get_volume(self, context, share_id):
"""Finds volume, associated to the specific share."""
volume_id = self.private_storage.get(share_id, 'volume_id')
if volume_id is not None:
return self.volume_api.get(context, volume_id)
else: # Fallback to legacy method
return self._get_volume_legacy(context, share_id)
def _get_volume_legacy(self, context, share_id):
# NOTE(u_glide): this method is deprecated and will be removed in
# future versions
volume_name = self._get_volume_name(share_id)
search_opts = {'name': volume_name}
if context.is_admin:
search_opts['all_tenants'] = True
volumes_list = self.volume_api.get_all(context, search_opts)
if len(volumes_list) == 1:
return volumes_list[0]
elif len(volumes_list) > 1:
LOG.error(
_LE("Expected only one volume in volume list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_name, 'result': volumes_list})
raise exception.ManilaException(
_("Error. Ambiguous volumes for name '%s'") % volume_name)
return None
def _get_volume_snapshot(self, context, snapshot_id):
"""Find volume snapshot associated to the specific share snapshot."""
volume_snapshot_id = self.private_storage.get(
snapshot_id, 'volume_snapshot_id')
if volume_snapshot_id is not None:
return self.volume_api.get_snapshot(context, volume_snapshot_id)
else: # Fallback to legacy method
return self._get_volume_snapshot_legacy(context, snapshot_id)
def _get_volume_snapshot_legacy(self, context, snapshot_id):
# NOTE(u_glide): this method is deprecated and will be removed in
# future versions
volume_snapshot_name = (
self.configuration.volume_snapshot_name_template % snapshot_id)
volume_snapshot_list = self.volume_api.get_all_snapshots(
context, {'name': volume_snapshot_name})
volume_snapshot = None
if len(volume_snapshot_list) == 1:
volume_snapshot = volume_snapshot_list[0]
elif len(volume_snapshot_list) > 1:
LOG.error(
_LE("Expected only one volume snapshot in list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_snapshot_name,
'result': volume_snapshot_list})
raise exception.ManilaException(
_('Error. Ambiguous volume snaphots'))
return volume_snapshot
def _detach_volume(self, context, share, server_details):
"""Detaches cinder volume from service vm."""
instance_id = server_details['instance_id']
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_detach():
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.warning(_LW("Volume not found for share %s. "
"Possibly already deleted."), share['id'])
volume = None
if volume and volume['id'] in attached_volumes:
self.compute_api.instance_volume_detach(
self.admin_context,
instance_id,
volume['id']
)
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] in (const.STATUS_AVAILABLE,
const.STATUS_ERROR):
break
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been detached in '
'%(max_time)ss. Giving up.') % err_msg)
do_detach()
def _allocate_container(self, context, share, snapshot=None):
"""Creates cinder volume, associated to share by name."""
volume_snapshot = None
if snapshot:
volume_snapshot = self._get_volume_snapshot(context,
snapshot['id'])
volume = self.volume_api.create(
context,
share['size'],
self.configuration.volume_name_template % share['id'], '',
snapshot=volume_snapshot,
volume_type=self.configuration.cinder_volume_type,
availability_zone=share['availability_zone'])
self.private_storage.update(
share['id'], {'volume_id': volume['id']})
msg_error = _('Failed to create volume')
msg_timeout = (
_('Volume has not been created in %ss. Giving up') %
self.configuration.max_time_to_create_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_create_volume,
msg_error=msg_error, msg_timeout=msg_timeout
)
def _wait_for_available_volume(self, volume, timeout,
msg_error, msg_timeout,
expected_size=None):
t = time.time()
while time.time() - t < timeout:
if volume['status'] == const.STATUS_AVAILABLE:
if expected_size and volume['size'] != expected_size:
LOG.debug("The volume %(vol_id)s is available but the "
"volume size does not match the expected size. "
"A volume resize operation may be pending. "
"Expected size: %(expected_size)s, "
"Actual size: %(volume_size)s.",
dict(vol_id=volume['id'],
expected_size=expected_size,
volume_size=volume['size']))
else:
break
elif 'error' in volume['status'].lower():
raise exception.ManilaException(msg_error)
time.sleep(1)
volume = self.volume_api.get(self.admin_context, volume['id'])
else:
raise exception.ManilaException(msg_timeout)
return volume
def _deallocate_container(self, context, share):
"""Deletes cinder volume."""
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.info(_LI("Volume not found. Already deleted?"))
volume = None
if volume:
if volume['status'] == 'in-use':
raise exception.ManilaException(
_('Volume is still in use and '
'cannot be deleted now.'))
self.volume_api.delete(context, volume['id'])
t = time.time()
while (time.time() - t <
self.configuration.max_time_to_create_volume):
try:
volume = self.volume_api.get(context, volume['id'])
except exception.VolumeNotFound:
LOG.debug('Volume was deleted successfully')
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume have not been '
'deleted in %ss. Giving up')
% self.configuration.max_time_to_create_volume)
def _update_share_stats(self):
"""Retrieve stats info from share volume group."""
data = dict(
share_backend_name=self.backend_name,
storage_protocol='NFS_CIFS',
reserved_percentage=self.configuration.reserved_share_percentage,
)
super(GenericShareDriver, self)._update_share_stats(data)
@ensure_server
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
return self._create_share(
context, share,
snapshot=snapshot,
share_server=share_server,
)
@ensure_server
def extend_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
volume = self._get_volume(self.admin_context, share['id'])
if int(new_size) > volume['size']:
self._detach_volume(self.admin_context, share, server_details)
volume = self._extend_volume(self.admin_context, volume, new_size)
volume = self._attach_volume(
self.admin_context,
share,
server_details['instance_id'],
volume)
self._resize_filesystem(server_details, volume)
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _extend_volume(self, context, volume, new_size):
self.volume_api.extend(context, volume['id'], new_size)
msg_error = _('Failed to extend volume %s') % volume['id']
msg_timeout = (
_('Volume has not been extended in %ss. Giving up') %
self.configuration.max_time_to_extend_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_extend_volume,
msg_error=msg_error, msg_timeout=msg_timeout,
expected_size=new_size
)
@ensure_server
def shrink_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
server_details, export_location)
consumed_space = self._get_consumed_space(mount_path, server_details)
LOG.debug("Consumed space on share: %s", consumed_space)
if consumed_space >= new_size:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
volume = self._get_volume(self.admin_context, share['id'])
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
try:
self._resize_filesystem(server_details, volume, new_size=new_size)
except exception.Invalid:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
except Exception as e:
msg = _("Cannot shrink share: %s") % six.text_type(e)
raise exception.Invalid(msg)
finally:
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _resize_filesystem(self, server_details, volume, new_size=None):
"""Resize filesystem of provided volume."""
check_command = ['sudo', 'fsck', '-pf', volume['mountpoint']]
self._ssh_exec(server_details, check_command)
command = ['sudo', 'resize2fs', volume['mountpoint']]
if new_size:
command.append("%sG" % six.text_type(new_size))
try:
self._ssh_exec(server_details, command)
except processutils.ProcessExecutionError as e:
if e.stderr.find('New size smaller than minimum') != -1:
msg = (_("Invalid 'new_size' provided: %s")
% six.text_type(new_size))
raise exception.Invalid(msg)
else:
msg = _("Cannot resize file-system: %s") % six.text_type(e)
raise exception.ManilaException(msg)
def _is_share_server_active(self, context, share_server):
"""Check if the share server is active."""
has_active_share_server = (
share_server and share_server.get('backend_details') and
self.service_instance_manager.ensure_service_instance(
context, share_server['backend_details']))
return has_active_share_server
def delete_share(self, context, share, share_server=None):
"""Deletes share."""
helper = self._get_helper(share)
if not self.driver_handles_share_servers:
share_server = self.service_instance_manager.get_common_server()
if self._is_share_server_active(context, share_server):
helper.remove_exports(
share_server['backend_details'], share['name'])
self._unmount_device(share, share_server['backend_details'])
self._detach_volume(self.admin_context, share,
share_server['backend_details'])
# Note(jun): It is an intended breakage to deal with the cases
# with any reason that caused absence of Nova instances.
self._deallocate_container(self.admin_context, share)
self.private_storage.delete(share['id'])
def create_snapshot(self, context, snapshot, share_server=None):
"""Creates a snapshot."""
model_update = {}
volume = self._get_volume(self.admin_context, snapshot['share_id'])
volume_snapshot_name = (self.configuration.
volume_snapshot_name_template % snapshot['id'])
volume_snapshot = self.volume_api.create_snapshot_force(
self.admin_context, volume['id'], volume_snapshot_name, '')
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
if volume_snapshot['status'] == const.STATUS_AVAILABLE:
break
if volume_snapshot['status'] == const.STATUS_ERROR:
raise exception.ManilaException(_('Failed to create volume '
'snapshot'))
time.sleep(1)
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
volume_snapshot['id'])
# NOTE(xyang): We should look at whether we still need to save
# volume_snapshot_id in private_storage later, now that is saved
# in provider_location.
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
# NOTE(xyang): Need to update provider_location in the db so
# that it can be used in manage/unmanage snapshot tempest tests.
model_update['provider_location'] = volume_snapshot['id']
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'created in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
return model_update
def delete_snapshot(self, context, snapshot, share_server=None):
"""Deletes a snapshot."""
volume_snapshot = self._get_volume_snapshot(self.admin_context,
snapshot['id'])
if volume_snapshot is None:
return
self.volume_api.delete_snapshot(self.admin_context,
volume_snapshot['id'])
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
try:
snapshot = self.volume_api.get_snapshot(self.admin_context,
volume_snapshot['id'])
except exception.VolumeSnapshotNotFound:
LOG.debug('Volume snapshot was deleted successfully')
self.private_storage.delete(snapshot['id'])
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'deleted in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
@ensure_server
def ensure_share(self, context, share, share_server=None):
"""Ensure that storage are mounted and exported."""
helper = self._get_helper(share)
volume = self._get_volume(context, share['id'])
# NOTE(vponomaryov): volume can be None for managed shares
if volume:
volume = self._attach_volume(
context,
share,
share_server['backend_details']['instance_id'],
volume)
self._mount_device(share, share_server['backend_details'], volume)
helper.create_exports(
share_server['backend_details'], share['name'], recreate=True)
@ensure_server
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules for given share.
This driver has two different behaviors according to parameters:
1. Recovery after error - 'access_rules' contains all access_rules,
'add_rules' and 'delete_rules' shall be empty. Previously existing
access rules are cleared and then added back according
to 'access_rules'.
2. Adding/Deleting of several access rules - 'access_rules' contains
all access_rules, 'add_rules' and 'delete_rules' contain rules which
should be added/deleted. Rules in 'access_rules' are ignored and
only rules from 'add_rules' and 'delete_rules' are applied.
:param context: Current context
:param share: Share model with share data.
:param access_rules: All access rules for given share
:param add_rules: Empty List or List of access rules which should be
added. access_rules already contains these rules.
:param delete_rules: Empty List or List of access rules which should be
removed. access_rules doesn't contain these rules.
:param share_server: None or Share server model
"""
self._get_helper(share).update_access(share_server['backend_details'],
share['name'], access_rules,
add_rules=add_rules,
delete_rules=delete_rules)
def _get_helper(self, share):
helper = self._helpers.get(share['share_proto'])
if helper:
return helper
else:
raise exception.InvalidShare(
reason="Wrong, unsupported or disabled protocol")
def get_network_allocations_number(self):
"""Get number of network interfaces to be created."""
# NOTE(vponomaryov): Generic driver does not need allocations, because
# Nova will handle it. It is valid for all multitenant drivers, that
# use service instance provided by Nova.
return 0
def _setup_server(self, network_info, metadata=None):
msg = "Creating share server '%s'."
LOG.debug(msg % network_info['server_id'])
server = self.service_instance_manager.set_up_service_instance(
self.admin_context, network_info)
for helper in self._helpers.values():
helper.init_helper(server)
return server
def _teardown_server(self, server_details, security_services=None):
instance_id = server_details.get("instance_id")
LOG.debug("Removing share infrastructure for service instance '%s'.",
instance_id)
self.service_instance_manager.delete_service_instance(
self.admin_context, server_details)
def manage_existing(self, share, driver_options):
"""Manage existing share to manila.
Generic driver accepts only one driver_option 'volume_id'.
If an administrator provides this option, then appropriate Cinder
volume will be managed by Manila as well.
:param share: share data
:param driver_options: Empty dict or dict with 'volume_id' option.
:return: dict with share size, example: {'size': 1}
"""
helper = self._get_helper(share)
share_server = self.service_instance_manager.get_common_server()
server_details = share_server['backend_details']
old_export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
share_server['backend_details'], old_export_location)
LOG.debug("Manage: mount path = %s", mount_path)
mounted = self._is_device_mounted(mount_path, server_details)
LOG.debug("Manage: is share mounted = %s", mounted)
if not mounted:
msg = _("Provided share %s is not mounted.") % share['id']
raise exception.ManageInvalidShare(reason=msg)
def get_volume():
if 'volume_id' in driver_options:
try:
return self.volume_api.get(
self.admin_context, driver_options['volume_id'])
except exception.VolumeNotFound as e:
raise exception.ManageInvalidShare(reason=six.text_type(e))
# NOTE(vponomaryov): Manila can only combine volume name by itself,
# nowhere to get volume ID from. Return None since Cinder volume
# names are not unique or fixed, hence, they can not be used for
# sure.
return None
share_volume = get_volume()
if share_volume:
instance_volumes = self.compute_api.instance_volumes_list(
self.admin_context, server_details['instance_id'])
attached_volumes = [vol.id for vol in instance_volumes]
LOG.debug('Manage: attached volumes = %s',
six.text_type(attached_volumes))
if share_volume['id'] not in attached_volumes:
msg = _("Provided volume %s is not attached "
"to service instance.") % share_volume['id']
raise exception.ManageInvalidShare(reason=msg)
linked_volume_name = self._get_volume_name(share['id'])
if share_volume['name'] != linked_volume_name:
LOG.debug('Manage: volume_id = %s' % share_volume['id'])
self.volume_api.update(self.admin_context, share_volume['id'],
{'name': linked_volume_name})
self.private_storage.update(
share['id'], {'volume_id': share_volume['id']})
share_size = share_volume['size']
else:
share_size = self._get_mounted_share_size(
mount_path, share_server['backend_details'])
export_locations = helper.get_exports_for_share(
server_details, old_export_location)
return {'size': share_size, 'export_locations': export_locations}
def manage_existing_snapshot(self, snapshot, driver_options):
"""Manage existing share snapshot with manila.
:param snapshot: Snapshot data
:param driver_options: Not used by the Generic driver currently
:return: dict with share snapshot size, example: {'size': 1}
"""
model_update = {}
volume_snapshot = None
snapshot_size = snapshot.get('share_size', 0)
provider_location = snapshot.get('provider_location')
try:
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
provider_location)
except exception.VolumeSnapshotNotFound as e:
raise exception.ManageInvalidShareSnapshot(
reason=six.text_type(e))
if volume_snapshot:
snapshot_size = volume_snapshot['size']
# NOTE(xyang): volume_snapshot_id is saved in private_storage
# in create_snapshot, so saving it here too for consistency.
# We should look at whether we still need to save it in
# private_storage later.
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
# NOTE(xyang): provider_location is used to map a Manila snapshot
# to its name on the storage backend and prevent managing of the
# same snapshot twice.
model_update['provider_location'] = volume_snapshot['id']
model_update['size'] = snapshot_size
return model_update
def unmanage_snapshot(self, snapshot):
"""Unmanage share snapshot with manila."""
self.private_storage.delete(snapshot['id'])
def _get_mount_stats_by_index(self, mount_path, server_details, index,
block_size='G'):
"""Get mount stats using df shell command.
:param mount_path: Share path on share server
:param server_details: Share server connection details
:param index: Data index in df command output:
BLOCK_DEVICE_SIZE_INDEX - Size of block device
USED_SPACE_INDEX - Used space
:param block_size: size of block (example: G, M, Mib, etc)
:returns: value of provided index
"""
share_size_cmd = ['df', '-PB%s' % block_size, mount_path]
output, __ = self._ssh_exec(server_details, share_size_cmd)
lines = output.split('\n')
return int(lines[1].split()[index][:-1])
def _get_mounted_share_size(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, BLOCK_DEVICE_SIZE_INDEX)
except Exception as e:
msg = _("Cannot calculate size of share %(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.ManageInvalidShare(reason=msg)
return size
def _get_consumed_space(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, USED_SPACE_INDEX, block_size='M')
size /= float(units.Ki)
except Exception as e:
msg = _("Cannot calculate consumed space on share "
"%(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.InvalidShare(reason=msg)
return size
| 44.067813 | 79 | 0.585882 |
import os
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
from oslo_utils import units
import retrying
import six
from manila.common import constants as const
from manila import compute
from manila import context
from manila import exception
from manila.i18n import _, _LE, _LI, _LW
from manila.share import driver
from manila.share.drivers import service_instance
from manila import utils
from manila import volume
LOG = log.getLogger(__name__)
share_opts = [
cfg.StrOpt('smb_template_config_path',
default='$state_path/smb.conf',
help="Path to smb config."),
cfg.StrOpt('volume_name_template',
default='manila-share-%s',
help="Volume name template."),
cfg.StrOpt('volume_snapshot_name_template',
default='manila-snapshot-%s',
help="Volume snapshot name template."),
cfg.StrOpt('share_mount_path',
default='/shares',
help="Parent path in service instance where shares "
"will be mounted."),
cfg.IntOpt('max_time_to_create_volume',
default=180,
help="Maximum time to wait for creating cinder volume."),
cfg.IntOpt('max_time_to_extend_volume',
default=180,
help="Maximum time to wait for extending cinder volume."),
cfg.IntOpt('max_time_to_attach',
default=120,
help="Maximum time to wait for attaching cinder volume."),
cfg.StrOpt('service_instance_smb_config_path',
default='$share_mount_path/smb.conf',
help="Path to SMB config in service instance."),
cfg.ListOpt('share_helpers',
default=[
'CIFS=manila.share.drivers.helpers.CIFSHelperIPAccess',
'NFS=manila.share.drivers.helpers.NFSHelper',
],
help='Specify list of share export helpers.'),
cfg.StrOpt('share_volume_fstype',
default='ext4',
choices=['ext4', 'ext3'],
help='Filesystem type of the share volume.'),
cfg.StrOpt('cinder_volume_type',
help='Name or id of cinder volume type which will be used '
'for all volumes created by driver.'),
]
CONF = cfg.CONF
CONF.register_opts(share_opts)
BLOCK_DEVICE_SIZE_INDEX = 1
USED_SPACE_INDEX = 2
def ensure_server(f):
def wrap(self, context, *args, **kwargs):
server = kwargs.get('share_server')
if not self.driver_handles_share_servers:
if not server:
server = self.service_instance_manager.get_common_server()
kwargs['share_server'] = server
else:
raise exception.ManilaException(
_("Share server handling is not available. "
"But 'share_server' was provided. '%s'. "
"Share network should not be used.") % server.get('id'))
elif not server:
raise exception.ManilaException(
_("Share server handling is enabled. But 'share_server' "
"is not provided. Make sure you used 'share_network'."))
if not server.get('backend_details'):
raise exception.ManilaException(
_("Share server '%s' does not have backend details.") %
server['id'])
if not self.service_instance_manager.ensure_service_instance(
context, server['backend_details']):
raise exception.ServiceInstanceUnavailable()
return f(self, context, *args, **kwargs)
return wrap
class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
def __init__(self, *args, **kwargs):
super(GenericShareDriver, self).__init__(
[False, True], *args, **kwargs)
self.admin_context = context.get_admin_context()
self.configuration.append_config_values(share_opts)
self._helpers = {}
self.backend_name = self.configuration.safe_get(
'share_backend_name') or "Cinder_Volumes"
self.ssh_connections = {}
self._setup_service_instance_manager()
self.private_storage = kwargs.get('private_storage')
def _setup_service_instance_manager(self):
self.service_instance_manager = (
service_instance.ServiceInstanceManager(
driver_config=self.configuration))
def _ssh_exec(self, server, command, check_exit_code=True):
connection = self.ssh_connections.get(server['instance_id'])
ssh_conn_timeout = self.configuration.ssh_conn_timeout
if not connection:
ssh_pool = utils.SSHPool(server['ip'],
22,
ssh_conn_timeout,
server['username'],
server.get('password'),
server.get('pk_path'),
max_size=1)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
else:
ssh_pool, ssh = connection
if not ssh.get_transport().is_active():
ssh_pool.remove(ssh)
ssh = ssh_pool.create()
self.ssh_connections[server['instance_id']] = (ssh_pool, ssh)
wrap = lambda token: "\"" + token + "\""
command = [wrap(tkn) if tkn.count(' ') else tkn for tkn in command]
return processutils.ssh_execute(ssh, ' '.join(command),
check_exit_code=check_exit_code)
def check_for_setup_error(self):
def do_setup(self, context):
super(GenericShareDriver, self).do_setup(context)
self.compute_api = compute.API()
self.volume_api = volume.API()
self._setup_helpers()
common_sv_available = False
share_server = None
sv_fetch_retry_interval = 5
while not (common_sv_available or self.driver_handles_share_servers):
try:
share_server = (
self.service_instance_manager.get_common_server())
common_sv_available = self._is_share_server_active(
context, share_server)
except Exception as ex:
LOG.error(ex)
if not common_sv_available:
time.sleep(sv_fetch_retry_interval)
LOG.warning(_LW("Waiting for the common service VM to become "
"available. "
"Driver is currently uninitialized. "
"Share server: %(share_server)s "
"Retry interval: %(retry_interval)s"),
dict(share_server=share_server,
retry_interval=sv_fetch_retry_interval))
def _setup_helpers(self):
helpers = self.configuration.share_helpers
if helpers:
for helper_str in helpers:
share_proto, __, import_str = helper_str.partition('=')
helper = importutils.import_class(import_str)
self._helpers[share_proto.upper()] = helper(
self._execute,
self._ssh_exec,
self.configuration)
else:
raise exception.ManilaException(
"No protocol helpers selected for Generic Driver. "
"Please specify using config option 'share_helpers'.")
@ensure_server
def create_share(self, context, share, share_server=None):
return self._create_share(
context, share,
snapshot=None,
share_server=share_server,
)
def _create_share(self, context, share, snapshot, share_server=None):
helper = self._get_helper(share)
server_details = share_server['backend_details']
volume = self._allocate_container(
self.admin_context, share, snapshot=snapshot)
volume = self._attach_volume(
self.admin_context, share, server_details['instance_id'], volume)
if not snapshot:
self._format_device(server_details, volume)
self._mount_device(share, server_details, volume)
export_locations = helper.create_exports(
server_details, share['name'])
return export_locations
@utils.retry(exception.ProcessExecutionError, backoff_rate=1)
def _is_device_file_available(self, server_details, volume):
command = ['sudo', 'test', '-b', volume['mountpoint']]
self._ssh_exec(server_details, command)
def _format_device(self, server_details, volume):
self._is_device_file_available(server_details, volume)
command = ['sudo', 'mkfs.%s' % self.configuration.share_volume_fstype,
volume['mountpoint']]
self._ssh_exec(server_details, command)
def _is_device_mounted(self, mount_path, server_details, volume=None):
log_data = {
'mount_path': mount_path,
'server_id': server_details['instance_id'],
}
if volume and volume.get('mountpoint', ''):
log_data['volume_id'] = volume['id']
log_data['dev_mount_path'] = volume['mountpoint']
msg = ("Checking whether volume '%(volume_id)s' with mountpoint "
"'%(dev_mount_path)s' is mounted on mount path '%(mount_p"
"ath)s' on server '%(server_id)s' or not." % log_data)
else:
msg = ("Checking whether mount path '%(mount_path)s' exists on "
"server '%(server_id)s' or not." % log_data)
LOG.debug(msg)
mounts_list_cmd = ['sudo', 'mount']
output, __ = self._ssh_exec(server_details, mounts_list_cmd)
mounts = output.split('\n')
for mount in mounts:
mount_elements = mount.split(' ')
if (len(mount_elements) > 2 and mount_path == mount_elements[2]):
if volume:
if (volume.get('mountpoint', '') == mount_elements[0]):
return True
else:
return True
return False
def _sync_mount_temp_and_perm_files(self, server_details):
try:
self._ssh_exec(
server_details,
['sudo', 'cp', const.MOUNT_FILE_TEMP, const.MOUNT_FILE],
)
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to sync mount files on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
try:
self._ssh_exec(server_details, ['sudo', 'mount', '-a'])
except exception.ProcessExecutionError as e:
LOG.error(_LE("Failed to mount all shares on server '%s'."),
server_details['instance_id'])
raise exception.ShareBackendException(msg=six.text_type(e))
def _mount_device(self, share, server_details, volume):
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _mount_device_with_lock():
mount_path = self._get_mount_path(share)
device_path = volume['mountpoint']
log_data = {
'dev': device_path,
'path': mount_path,
'server': server_details['instance_id'],
}
try:
if not self._is_device_mounted(mount_path, server_details,
volume):
LOG.debug("Mounting '%(dev)s' to path '%(path)s' on "
"server '%(server)s'.", log_data)
mount_cmd = (
'sudo', 'mkdir', '-p', mount_path,
'&&', 'sudo', 'mount', device_path, mount_path,
'&&', 'sudo', 'chmod', '777', mount_path,
'&&', 'sudo', 'umount', mount_path,
'&&', 'sudo', 'tune2fs', '-U', 'random', device_path,
'&&', 'sudo', 'mount', device_path, mount_path,
)
self._ssh_exec(server_details, mount_cmd)
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' already exists on "
"server '%(server)s'."), log_data)
except exception.ProcessExecutionError as e:
raise exception.ShareBackendException(msg=six.text_type(e))
return _mount_device_with_lock()
@utils.retry(exception.ProcessExecutionError)
def _unmount_device(self, share, server_details):
@utils.synchronized('generic_driver_mounts_'
'%s' % server_details['instance_id'])
def _unmount_device_with_lock():
mount_path = self._get_mount_path(share)
log_data = {
'path': mount_path,
'server': server_details['instance_id'],
}
if self._is_device_mounted(mount_path, server_details):
LOG.debug("Unmounting path '%(path)s' on server "
"'%(server)s'.", log_data)
unmount_cmd = ['sudo', 'umount', mount_path, '&&', 'sudo',
'rmdir', mount_path]
self._ssh_exec(server_details, unmount_cmd)
self._sync_mount_temp_and_perm_files(server_details)
else:
LOG.warning(_LW("Mount point '%(path)s' does not exist on "
"server '%(server)s'."), log_data)
return _unmount_device_with_lock()
def _get_mount_path(self, share):
return os.path.join(self.configuration.share_mount_path, share['name'])
def _attach_volume(self, context, share, instance_id, volume):
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_attach(volume):
if volume['status'] == 'in-use':
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
if volume['id'] in attached_volumes:
return volume
else:
raise exception.ManilaException(
_('Volume %s is already attached to another instance')
% volume['id'])
@retrying.retry(stop_max_attempt_number=3,
wait_fixed=2000,
retry_on_exception=lambda exc: True)
def attach_volume():
self.compute_api.instance_volume_attach(
self.admin_context, instance_id, volume['id'])
attach_volume()
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] == 'in-use':
return volume
elif volume['status'] != 'attaching':
raise exception.ManilaException(
_('Failed to attach volume %s') % volume['id'])
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been attached in '
'%(max_time)ss. Giving up.') % err_msg)
return do_attach(volume)
def _get_volume_name(self, share_id):
return self.configuration.volume_name_template % share_id
def _get_volume(self, context, share_id):
volume_id = self.private_storage.get(share_id, 'volume_id')
if volume_id is not None:
return self.volume_api.get(context, volume_id)
else:
return self._get_volume_legacy(context, share_id)
def _get_volume_legacy(self, context, share_id):
volume_name = self._get_volume_name(share_id)
search_opts = {'name': volume_name}
if context.is_admin:
search_opts['all_tenants'] = True
volumes_list = self.volume_api.get_all(context, search_opts)
if len(volumes_list) == 1:
return volumes_list[0]
elif len(volumes_list) > 1:
LOG.error(
_LE("Expected only one volume in volume list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_name, 'result': volumes_list})
raise exception.ManilaException(
_("Error. Ambiguous volumes for name '%s'") % volume_name)
return None
def _get_volume_snapshot(self, context, snapshot_id):
volume_snapshot_id = self.private_storage.get(
snapshot_id, 'volume_snapshot_id')
if volume_snapshot_id is not None:
return self.volume_api.get_snapshot(context, volume_snapshot_id)
else:
return self._get_volume_snapshot_legacy(context, snapshot_id)
def _get_volume_snapshot_legacy(self, context, snapshot_id):
volume_snapshot_name = (
self.configuration.volume_snapshot_name_template % snapshot_id)
volume_snapshot_list = self.volume_api.get_all_snapshots(
context, {'name': volume_snapshot_name})
volume_snapshot = None
if len(volume_snapshot_list) == 1:
volume_snapshot = volume_snapshot_list[0]
elif len(volume_snapshot_list) > 1:
LOG.error(
_LE("Expected only one volume snapshot in list with name "
"'%(name)s', but got more than one in a result - "
"'%(result)s'."), {
'name': volume_snapshot_name,
'result': volume_snapshot_list})
raise exception.ManilaException(
_('Error. Ambiguous volume snaphots'))
return volume_snapshot
def _detach_volume(self, context, share, server_details):
instance_id = server_details['instance_id']
@utils.synchronized(
"generic_driver_attach_detach_%s" % instance_id, external=True)
def do_detach():
attached_volumes = [vol.id for vol in
self.compute_api.instance_volumes_list(
self.admin_context, instance_id)]
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.warning(_LW("Volume not found for share %s. "
"Possibly already deleted."), share['id'])
volume = None
if volume and volume['id'] in attached_volumes:
self.compute_api.instance_volume_detach(
self.admin_context,
instance_id,
volume['id']
)
t = time.time()
while time.time() - t < self.configuration.max_time_to_attach:
volume = self.volume_api.get(context, volume['id'])
if volume['status'] in (const.STATUS_AVAILABLE,
const.STATUS_ERROR):
break
time.sleep(1)
else:
err_msg = {
'volume_id': volume['id'],
'max_time': self.configuration.max_time_to_attach
}
raise exception.ManilaException(
_('Volume %(volume_id)s has not been detached in '
'%(max_time)ss. Giving up.') % err_msg)
do_detach()
def _allocate_container(self, context, share, snapshot=None):
volume_snapshot = None
if snapshot:
volume_snapshot = self._get_volume_snapshot(context,
snapshot['id'])
volume = self.volume_api.create(
context,
share['size'],
self.configuration.volume_name_template % share['id'], '',
snapshot=volume_snapshot,
volume_type=self.configuration.cinder_volume_type,
availability_zone=share['availability_zone'])
self.private_storage.update(
share['id'], {'volume_id': volume['id']})
msg_error = _('Failed to create volume')
msg_timeout = (
_('Volume has not been created in %ss. Giving up') %
self.configuration.max_time_to_create_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_create_volume,
msg_error=msg_error, msg_timeout=msg_timeout
)
def _wait_for_available_volume(self, volume, timeout,
msg_error, msg_timeout,
expected_size=None):
t = time.time()
while time.time() - t < timeout:
if volume['status'] == const.STATUS_AVAILABLE:
if expected_size and volume['size'] != expected_size:
LOG.debug("The volume %(vol_id)s is available but the "
"volume size does not match the expected size. "
"A volume resize operation may be pending. "
"Expected size: %(expected_size)s, "
"Actual size: %(volume_size)s.",
dict(vol_id=volume['id'],
expected_size=expected_size,
volume_size=volume['size']))
else:
break
elif 'error' in volume['status'].lower():
raise exception.ManilaException(msg_error)
time.sleep(1)
volume = self.volume_api.get(self.admin_context, volume['id'])
else:
raise exception.ManilaException(msg_timeout)
return volume
def _deallocate_container(self, context, share):
try:
volume = self._get_volume(context, share['id'])
except exception.VolumeNotFound:
LOG.info(_LI("Volume not found. Already deleted?"))
volume = None
if volume:
if volume['status'] == 'in-use':
raise exception.ManilaException(
_('Volume is still in use and '
'cannot be deleted now.'))
self.volume_api.delete(context, volume['id'])
t = time.time()
while (time.time() - t <
self.configuration.max_time_to_create_volume):
try:
volume = self.volume_api.get(context, volume['id'])
except exception.VolumeNotFound:
LOG.debug('Volume was deleted successfully')
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume have not been '
'deleted in %ss. Giving up')
% self.configuration.max_time_to_create_volume)
def _update_share_stats(self):
data = dict(
share_backend_name=self.backend_name,
storage_protocol='NFS_CIFS',
reserved_percentage=self.configuration.reserved_share_percentage,
)
super(GenericShareDriver, self)._update_share_stats(data)
@ensure_server
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
return self._create_share(
context, share,
snapshot=snapshot,
share_server=share_server,
)
@ensure_server
def extend_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
volume = self._get_volume(self.admin_context, share['id'])
if int(new_size) > volume['size']:
self._detach_volume(self.admin_context, share, server_details)
volume = self._extend_volume(self.admin_context, volume, new_size)
volume = self._attach_volume(
self.admin_context,
share,
server_details['instance_id'],
volume)
self._resize_filesystem(server_details, volume)
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _extend_volume(self, context, volume, new_size):
self.volume_api.extend(context, volume['id'], new_size)
msg_error = _('Failed to extend volume %s') % volume['id']
msg_timeout = (
_('Volume has not been extended in %ss. Giving up') %
self.configuration.max_time_to_extend_volume
)
return self._wait_for_available_volume(
volume, self.configuration.max_time_to_extend_volume,
msg_error=msg_error, msg_timeout=msg_timeout,
expected_size=new_size
)
@ensure_server
def shrink_share(self, share, new_size, share_server=None):
server_details = share_server['backend_details']
helper = self._get_helper(share)
export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
server_details, export_location)
consumed_space = self._get_consumed_space(mount_path, server_details)
LOG.debug("Consumed space on share: %s", consumed_space)
if consumed_space >= new_size:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
volume = self._get_volume(self.admin_context, share['id'])
helper.disable_access_for_maintenance(server_details, share['name'])
self._unmount_device(share, server_details)
try:
self._resize_filesystem(server_details, volume, new_size=new_size)
except exception.Invalid:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=share['id'])
except Exception as e:
msg = _("Cannot shrink share: %s") % six.text_type(e)
raise exception.Invalid(msg)
finally:
self._mount_device(share, server_details, volume)
helper.restore_access_after_maintenance(server_details,
share['name'])
def _resize_filesystem(self, server_details, volume, new_size=None):
check_command = ['sudo', 'fsck', '-pf', volume['mountpoint']]
self._ssh_exec(server_details, check_command)
command = ['sudo', 'resize2fs', volume['mountpoint']]
if new_size:
command.append("%sG" % six.text_type(new_size))
try:
self._ssh_exec(server_details, command)
except processutils.ProcessExecutionError as e:
if e.stderr.find('New size smaller than minimum') != -1:
msg = (_("Invalid 'new_size' provided: %s")
% six.text_type(new_size))
raise exception.Invalid(msg)
else:
msg = _("Cannot resize file-system: %s") % six.text_type(e)
raise exception.ManilaException(msg)
def _is_share_server_active(self, context, share_server):
has_active_share_server = (
share_server and share_server.get('backend_details') and
self.service_instance_manager.ensure_service_instance(
context, share_server['backend_details']))
return has_active_share_server
def delete_share(self, context, share, share_server=None):
helper = self._get_helper(share)
if not self.driver_handles_share_servers:
share_server = self.service_instance_manager.get_common_server()
if self._is_share_server_active(context, share_server):
helper.remove_exports(
share_server['backend_details'], share['name'])
self._unmount_device(share, share_server['backend_details'])
self._detach_volume(self.admin_context, share,
share_server['backend_details'])
self._deallocate_container(self.admin_context, share)
self.private_storage.delete(share['id'])
def create_snapshot(self, context, snapshot, share_server=None):
model_update = {}
volume = self._get_volume(self.admin_context, snapshot['share_id'])
volume_snapshot_name = (self.configuration.
volume_snapshot_name_template % snapshot['id'])
volume_snapshot = self.volume_api.create_snapshot_force(
self.admin_context, volume['id'], volume_snapshot_name, '')
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
if volume_snapshot['status'] == const.STATUS_AVAILABLE:
break
if volume_snapshot['status'] == const.STATUS_ERROR:
raise exception.ManilaException(_('Failed to create volume '
'snapshot'))
time.sleep(1)
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
volume_snapshot['id'])
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
model_update['provider_location'] = volume_snapshot['id']
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'created in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
return model_update
def delete_snapshot(self, context, snapshot, share_server=None):
volume_snapshot = self._get_volume_snapshot(self.admin_context,
snapshot['id'])
if volume_snapshot is None:
return
self.volume_api.delete_snapshot(self.admin_context,
volume_snapshot['id'])
t = time.time()
while time.time() - t < self.configuration.max_time_to_create_volume:
try:
snapshot = self.volume_api.get_snapshot(self.admin_context,
volume_snapshot['id'])
except exception.VolumeSnapshotNotFound:
LOG.debug('Volume snapshot was deleted successfully')
self.private_storage.delete(snapshot['id'])
break
time.sleep(1)
else:
raise exception.ManilaException(
_('Volume snapshot have not been '
'deleted in %ss. Giving up') %
self.configuration.max_time_to_create_volume)
@ensure_server
def ensure_share(self, context, share, share_server=None):
helper = self._get_helper(share)
volume = self._get_volume(context, share['id'])
if volume:
volume = self._attach_volume(
context,
share,
share_server['backend_details']['instance_id'],
volume)
self._mount_device(share, share_server['backend_details'], volume)
helper.create_exports(
share_server['backend_details'], share['name'], recreate=True)
@ensure_server
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
self._get_helper(share).update_access(share_server['backend_details'],
share['name'], access_rules,
add_rules=add_rules,
delete_rules=delete_rules)
def _get_helper(self, share):
helper = self._helpers.get(share['share_proto'])
if helper:
return helper
else:
raise exception.InvalidShare(
reason="Wrong, unsupported or disabled protocol")
def get_network_allocations_number(self):
return 0
def _setup_server(self, network_info, metadata=None):
msg = "Creating share server '%s'."
LOG.debug(msg % network_info['server_id'])
server = self.service_instance_manager.set_up_service_instance(
self.admin_context, network_info)
for helper in self._helpers.values():
helper.init_helper(server)
return server
def _teardown_server(self, server_details, security_services=None):
instance_id = server_details.get("instance_id")
LOG.debug("Removing share infrastructure for service instance '%s'.",
instance_id)
self.service_instance_manager.delete_service_instance(
self.admin_context, server_details)
def manage_existing(self, share, driver_options):
helper = self._get_helper(share)
share_server = self.service_instance_manager.get_common_server()
server_details = share_server['backend_details']
old_export_location = share['export_locations'][0]['path']
mount_path = helper.get_share_path_by_export_location(
share_server['backend_details'], old_export_location)
LOG.debug("Manage: mount path = %s", mount_path)
mounted = self._is_device_mounted(mount_path, server_details)
LOG.debug("Manage: is share mounted = %s", mounted)
if not mounted:
msg = _("Provided share %s is not mounted.") % share['id']
raise exception.ManageInvalidShare(reason=msg)
def get_volume():
if 'volume_id' in driver_options:
try:
return self.volume_api.get(
self.admin_context, driver_options['volume_id'])
except exception.VolumeNotFound as e:
raise exception.ManageInvalidShare(reason=six.text_type(e))
return None
share_volume = get_volume()
if share_volume:
instance_volumes = self.compute_api.instance_volumes_list(
self.admin_context, server_details['instance_id'])
attached_volumes = [vol.id for vol in instance_volumes]
LOG.debug('Manage: attached volumes = %s',
six.text_type(attached_volumes))
if share_volume['id'] not in attached_volumes:
msg = _("Provided volume %s is not attached "
"to service instance.") % share_volume['id']
raise exception.ManageInvalidShare(reason=msg)
linked_volume_name = self._get_volume_name(share['id'])
if share_volume['name'] != linked_volume_name:
LOG.debug('Manage: volume_id = %s' % share_volume['id'])
self.volume_api.update(self.admin_context, share_volume['id'],
{'name': linked_volume_name})
self.private_storage.update(
share['id'], {'volume_id': share_volume['id']})
share_size = share_volume['size']
else:
share_size = self._get_mounted_share_size(
mount_path, share_server['backend_details'])
export_locations = helper.get_exports_for_share(
server_details, old_export_location)
return {'size': share_size, 'export_locations': export_locations}
def manage_existing_snapshot(self, snapshot, driver_options):
model_update = {}
volume_snapshot = None
snapshot_size = snapshot.get('share_size', 0)
provider_location = snapshot.get('provider_location')
try:
volume_snapshot = self.volume_api.get_snapshot(
self.admin_context,
provider_location)
except exception.VolumeSnapshotNotFound as e:
raise exception.ManageInvalidShareSnapshot(
reason=six.text_type(e))
if volume_snapshot:
snapshot_size = volume_snapshot['size']
self.private_storage.update(
snapshot['id'], {'volume_snapshot_id': volume_snapshot['id']})
model_update['provider_location'] = volume_snapshot['id']
model_update['size'] = snapshot_size
return model_update
def unmanage_snapshot(self, snapshot):
self.private_storage.delete(snapshot['id'])
def _get_mount_stats_by_index(self, mount_path, server_details, index,
block_size='G'):
share_size_cmd = ['df', '-PB%s' % block_size, mount_path]
output, __ = self._ssh_exec(server_details, share_size_cmd)
lines = output.split('\n')
return int(lines[1].split()[index][:-1])
def _get_mounted_share_size(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, BLOCK_DEVICE_SIZE_INDEX)
except Exception as e:
msg = _("Cannot calculate size of share %(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.ManageInvalidShare(reason=msg)
return size
def _get_consumed_space(self, mount_path, server_details):
try:
size = self._get_mount_stats_by_index(
mount_path, server_details, USED_SPACE_INDEX, block_size='M')
size /= float(units.Ki)
except Exception as e:
msg = _("Cannot calculate consumed space on share "
"%(path)s : %(error)s") % {
'path': mount_path,
'error': six.text_type(e)
}
raise exception.InvalidShare(reason=msg)
return size
| true | true |
f7177eeda341fbfbb4601d6a8a82e1d73d1f95ba | 17,754 | py | Python | acg/custom_widgets/selection_widgets.py | david-fischer/Anki_CardGen | 909d088ed4e98b97f65a2c896dc607941b00e4da | [
"MIT"
] | 2 | 2021-01-11T08:59:57.000Z | 2021-02-01T12:15:30.000Z | acg/custom_widgets/selection_widgets.py | david-fischer/Anki_CardGen | 909d088ed4e98b97f65a2c896dc607941b00e4da | [
"MIT"
] | null | null | null | acg/custom_widgets/selection_widgets.py | david-fischer/Anki_CardGen | 909d088ed4e98b97f65a2c896dc607941b00e4da | [
"MIT"
] | null | null | null | """Implements various elements to get user selection."""
from functools import partial
from kivy.animation import Animation
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
ObjectProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import AsyncImage
from kivy.uix.modalview import ModalView
from kivy.uix.stacklayout import StackLayout
from kivymd.app import MDApp
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import CircularRippleBehavior, RectangularRippleBehavior
from kivymd.uix.card import MDCard
from kivymd.uix.imagelist import SmartTile
from .behaviors import (
CheckBehavior,
ChildrenFromDataBehavior,
LongPressBehavior,
ThemableColorChangeBehavior,
TranslationOnCheckBehavior,
)
class SeparatorWithHeading(FloatLayout):
r"""Two :class:`MDSeparator`\ s with a heading in between."""
heading = StringProperty("")
""":class:`~kivy.properties.StringProperty` with string used as heading."""
class CheckContainer(ChildrenFromDataBehavior):
"""Container for widgets with :class:`~custom_widgets.behaviors.CheckBehavior`."""
check_one = BooleanProperty(False)
""":class:`~kivy.properties.BooleanProperty` defaults to ``False``. If ``True`` only one child can be selected."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings["current_state"] = self.conditional_uncheck
def conditional_uncheck(self, instance, value):
"""Uncheck other widgets if :attr:`check_one` is ``True``."""
if self.check_one:
for check_element in [
others for others in self.children if others != instance and value
]:
check_element.current_state = False
def get_checked(self, attribute_name=None):
"""
Return current selection.
Args:
attribute_name: Name of attribute to return. Defaults to ``None``.
Returns:
:* If ``attribute_name`` is None: List of selected children
* Else: List of attribute values
"""
checked_elements = [
element for element in self.children[::-1] if element.current_state
]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
class CheckCard(ThemableColorChangeBehavior, MDCard):
"""Selectable :~kivymd.uix.card.MDCard`. Select by click. Changes color on selection."""
text = StringProperty("test " * 15)
""":class:`~kivy.properties.StringProperty`."""
def on_press(self):
"""Change boolean value of :attr:`self.current_state`."""
self.current_state = ( # pylint: disable=attribute-defined-outside-init
not self.current_state
)
class CheckChip(
CircularRippleBehavior,
ButtonBehavior,
ThemableColorChangeBehavior,
BoxLayout,
):
"""Selectable Chip. Select by click. Change color on selection."""
icon = StringProperty("")
""":class:`~kivy.properties.StringProperty` defaults to ""."""
text = StringProperty("")
""":class:`~kivy.properties.StringProperty` defaults to ""."""
def on_press(self):
"""Change boolean value of :attr:`current_state`."""
self.current_state = ( # pylint: disable=attribute-defined-outside-init
not self.current_state
)
class TransChip(TranslationOnCheckBehavior, CheckChip):
"""Selectable Chip. Select by click. Change color and text on selection."""
class CheckChipContainer(CheckContainer, ThemableBehavior, StackLayout):
r"""Container for :class:`CheckChip`\ s. Use :attr:`child_dict` to populate."""
child_class_name = "CheckChip"
draw_box = BooleanProperty(False)
class CheckImageTile(CheckBehavior, SmartTile):
"""
Selectable :class:`~kivymd.uix.imagelist.SmartTile`.
Select by click. Changes :attr:`opacity` and :attr:`boarder_width` on selection.
"""
border_width = NumericProperty(0.01)
""":class:`~kivy.properties.NumericProperty` describing boarder-width of image tile."""
def __init__(self, **kwargs):
self.state_dicts = {
True: {"opacity": 1, "border_width": 3},
False: {"opacity": 0.8, "border_width": 0.01},
}
super().__init__(**kwargs)
def on_press(self):
"""Change boolean value of current state on press."""
self.current_state = ( # pylint: disable=attribute-defined-outside-init
not self.current_state
)
class TransCard(LongPressBehavior, MDCard, RectangularRippleBehavior):
"""Displays :attr:`text_orig` and :attr:`text_trans`, separated by a line."""
text_orig = StringProperty("")
""":class:`~kivy.properties.StringProperty` first text."""
text_trans = StringProperty("")
""":class:`~kivy.properties.StringProperty` second text."""
orientation = OptionProperty("vertical", options=["vertical", "horizontal"])
""":class:`~kivy.properties.OptionProperty` possible values ["vertical", "horizontal"] defaults to "vertical"."""
class LongPressImage(LongPressBehavior, AsyncImage):
""":class:`~kivy.uix.image.AsyncImage` with additional "on_press" and "on_long_press" event."""
Factory.register("LongPressImage", LongPressImage)
Factory.register("TransCard", TransCard)
class MyCarousel(FloatLayout, ChildrenFromDataBehavior):
"""
Carousel that constructs contents from :attr:`data`.
On click, opens a modal with list of content.
"""
carousel = ObjectProperty()
""":class:`~kivy.properties.ObjectProperty`"""
modal_layout_name = StringProperty()
""":class:`~kivy.properties.StringProperty`"""
modal_data_cls_name = StringProperty()
""":class:`~kivy.properties.StringProperty`"""
modal = ModalView()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings = {
"height": self.update_height,
"on_press": self.open_menu,
}
self.on_data()
def update_num_children(self):
"""Add/remove children until correct number is reached."""
diff = len(self.data) - len(self.root_for_children.children) + 1
for _ in range(abs(diff)):
if diff > 0:
self.add_child()
else:
self.remove_child()
def on_data(self, *_):
"""Override :meth:`behaviors.ChildrenFromDataBehavior.on_data` with correct list of children.
The children are in ``carousel.slides`` as opposed to ``carousel.children``.
"""
if self.child_class_name:
self.update_num_children()
self.carousel.index = 1
for i, child_dict in enumerate(self.data, start=1):
for key, val in child_dict.items():
setattr(self.carousel.slides[i], key, val)
def remove_child(self):
"""Override :meth:`behaviors.ChildrenFromDataBehavior.remove_child` with correct list of children.
The children are in ``carousel.slides`` as opposed to ``carousel.children``.
"""
last_slide = self.carousel.slides[-1]
self.carousel.remove_widget(last_slide)
def before_add_child(self, child):
"""Bind :meth:`set_child_width` to change of :attr:`width`."""
self.bind(width=lambda *_: self.set_child_width(child))
def after_add_child(self, child):
"""Call :meth:`set_child_width` after adding child."""
self.set_child_width(child)
def set_child_width(self, child, *_):
"""Set width of child to :attr:`width` - width of left and right-icon."""
width = self.width - self.ids.left_icon.width - self.ids.right_icon.width
setattr(child, "width", width)
def update_height(self, *_):
"""Implement in sub class. Placeholder."""
def get_modal_content(self, size_hint=(1, None)):
"""Return root widget to display on the modal."""
def set_carousel_index(i, *_):
self.carousel.index = i
self.modal.dismiss()
data_dicts = [
{"size_hint": size_hint, "on_press": partial(set_carousel_index, 0)}
] + [
{**dict, "size_hint": size_hint, "on_press": partial(set_carousel_index, i)}
for i, dict in enumerate(self.data, start=1)
]
recycle_view_cls = Factory.get(self.modal_layout_name)
recycle_view = recycle_view_cls()
recycle_view.child_class_name = self.modal_data_cls_name
recycle_view.data = data_dicts
return recycle_view
def get_checked(self, attribute_name=None):
"""If ``attribute_name`` is ``None``, return currently selected widget, else return a property thereof."""
checked_elements = [self.carousel.current_slide]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
def open_menu(self, *_):
"""Open :class:`kivy.uix.modalview.ModalView` with content given by :meth:`get_modal_content`."""
self.modal = ModalView()
modal_content = self.get_modal_content()
self.modal.add_widget(modal_content)
self.modal.open()
class ImageCarousel(MyCarousel):
"""Carousel of images."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings["on_error"] = lambda *_: self.dispatch("on_error", *_)
self.register_event_type("on_error")
self.on_data()
def get_modal_content(self, size_hint=(1, 1)):
"""Call :meth:`MyCarousel.get_modal_content` with ``size_hint=(1,1)``."""
return super().get_modal_content(size_hint=size_hint)
def on_error(self, *_):
"""Placeholder-function."""
class CardCarousel(MyCarousel):
"""
Carousel of :class:`TransCard`.
To use it with different objects, change :attr:`viewclass` and :attr:`modal_data_cls_name`.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
del self.child_bindings["on_press"]
def update_height(self, *_):
"""Update height via animation, so that Widget has height of currently displayed card."""
if self.carousel.current_slide:
new_height = self.carousel.current_slide.height + 24
if self.height != new_height:
anim = Animation(height=new_height, duration=0.2)
anim.start(self)
class RecycleCarousel(FloatLayout):
"""
Wrapper class for a :class:`~kivy.uix.carousel.Carousel` that uses only 3 slides to update content dynamically.
The :attr:`index` is updated according to the change of the carousel index and each time one of the slides is
updated with data from :attr:`data`. The content of the slides is constructed as instances of :attr:`viewclass`.
"""
carousel = ObjectProperty()
""":class:`kivy.properties.ObjectProperty` defaults to ``None``."""
viewclass = StringProperty("TransCard")
""":class:`kivy.properties.StringProperty` defaults to ``"TransCard"``. Class name of the widgets that are added
to the carousel."""
data = ListProperty()
""":class:`kivy.properties.ListProperty` defaults to ``None``. List of dictionaries from which the content is
generated."""
slide_width = NumericProperty()
""":class:`kivy.properties.NumericProperty` defaults to ``None``. Width that the content of the slides should
have."""
dynamic_height = BooleanProperty(False)
""":class:`kivy.properties.BooleanProperty` defaults to ``False``. If ``True`` updates the height of the root
widget to the height of the object on the current slide + 24. Only possible if size_hint_y of the widget on the
slide is not set."""
index = NumericProperty(0)
""":class:`kivy.properties.NumericProperty` defaults to ``0``. Current (virtual) index."""
last_carousel_index = NumericProperty(0)
""":class:`kivy.properties.NumericProperty` defaults to ``0``. Last index that the :attr:`carousel` had. Used to
determine whether the user did slide right or left."""
current_slide = ObjectProperty()
""":class:`kivy.properties.ObjectProperty`. Reference to :attr:`carousel`.current_slide."""
modal_layout_name = StringProperty()
""":class:`kivy.properties.StringProperty` defaults to ``None``. Class name for root widget of :attr:`modal`."""
modal_data_cls_name = StringProperty()
""":class:`kivy.properties.StringProperty` defaults to ``None``. Class name for children of :attr:`modal`."""
modal = ObjectProperty(ModalView())
""":class:`kivy.properties.ObjectProperty` defaults to ``ModalView()``."""
default_modal_size_hint = ListProperty([1, None])
def update_height(self, *_):
"""Update height via animation, so that Widget has height of currently displayed card."""
if self.dynamic_height:
new_height = self.carousel.current_slide.height + 24
if self.height != new_height:
anim = Animation(height=new_height, duration=0.3)
anim.start(self)
def setup_modal(self):
"""Return root widget to display on the modal."""
self.modal = ModalView()
modal_root_cls = Factory.get(self.modal_layout_name)
modal_root = modal_root_cls()
self.modal.add_widget(modal_root)
def _modal_child_callback(self, i, *_):
self.set_index(i)
self.modal.dismiss()
def update_modal_content(self):
"""Update content of modal."""
data_dicts = [
{
**dict,
"size_hint": self.default_modal_size_hint,
"on_press": partial(self._modal_child_callback, i),
}
for i, dict in enumerate(self.data)
]
self.modal.children[0].child_class_name = self.modal_data_cls_name
self.modal.children[0].data = data_dicts
def get_checked(self, attribute_name=None):
"""If ``attribute_name`` is ``None``, return currently selected widget, else return a property thereof."""
checked_elements = [self.carousel.current_slide]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
def open_menu(self, *_):
"""Open :class:`kivy.uix.modalview.ModalView` with content given by :meth:`setup_modal`."""
if not self.modal.children:
self.setup_modal()
self.update_modal_content()
self.modal.open()
def on_data(self, *_):
"""Set up :attr:`carousel` by initializing 3 widgets, adding them and binding some Properties."""
self.carousel.clear_widgets()
if len(self.data) >= 3:
for i in [0, 1, -1]:
widget = Factory.get(self.viewclass)(**self.data[i])
self.carousel.add_widget(widget)
self.bind(slide_width=widget.setter("width"))
widget.bind(on_press=self.open_menu)
widget.width = self.slide_width
self.carousel.register_event_type("on_index")
self.carousel.bind(index=self.update_index)
self.carousel.bind(current_slide=self.update_height)
self.carousel.current_slide.bind(height=self.update_height)
print("RecylceCarousel needs at least 3 elements to be displayed correctly.")
def update_index(self, _, carousel_index):
"""Change :attr:`index` according to change in ``carousel_index`` and update one of the three slides."""
diff = carousel_index - self.last_carousel_index
diff = -1 if diff == 2 else 1 if diff == -2 else diff
self.last_carousel_index = carousel_index
self.index = (self.index + diff) % len(self.data)
self.update_slide(carousel_index + diff, self.index + diff)
def update_slide(self, carousel_index, index):
"""
Update slide with index ``carousel_index`` by content from :attr:`data` [index].
Modulo function applied to indices guarantees values to be in the correct range.
"""
carousel_index %= 3
index %= len(self.data)
for name, val in self.data[index].items():
setattr(self.carousel.slides[carousel_index], name, val)
def set_index(self, index):
"""Set :attr:`index` to ``index`` and updates carousel accordingly."""
self.index = index
self.update_height()
for i in [0, 1, -1]:
self.update_slide((self.last_carousel_index + i) % 3, self.index + i)
# pylint: disable = W,C,R,I
if __name__ == "__main__":
CARD_CAROUSEL_STRING = (
"CardCarousel:\n"
' data: [{"text_orig":str(i)*50*i,"text_trans":"Trans"} for i in range(10)]'
)
RECYCLE_CAROUSEL_STRING = (
"RecycleCardCarousel:\n" # some comment
' data: [{"text_orig":str(i)*50*i,"text_trans":"Trans"} for i in range(10)]'
)
IMAGE_CAROUSEL_STRING = (
"ImageCarousel:\n"
' data: [{"source":"../assets/AnkiCardGen.png"} for _ in range(5)]'
)
class _TestApp(MDApp):
def build(self):
self.theme_cls.primary_palette = "Red" # "Purple", "Red"
self.theme_cls.theme_style = "Light" # "Purple", "Red"
return Builder.load_string(RECYCLE_CAROUSEL_STRING)
_TestApp().run()
| 37.694268 | 118 | 0.652529 |
from functools import partial
from kivy.animation import Animation
from kivy.factory import Factory
from kivy.lang import Builder
from kivy.properties import (
BooleanProperty,
ListProperty,
NumericProperty,
ObjectProperty,
OptionProperty,
StringProperty,
)
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import AsyncImage
from kivy.uix.modalview import ModalView
from kivy.uix.stacklayout import StackLayout
from kivymd.app import MDApp
from kivymd.theming import ThemableBehavior
from kivymd.uix.behaviors import CircularRippleBehavior, RectangularRippleBehavior
from kivymd.uix.card import MDCard
from kivymd.uix.imagelist import SmartTile
from .behaviors import (
CheckBehavior,
ChildrenFromDataBehavior,
LongPressBehavior,
ThemableColorChangeBehavior,
TranslationOnCheckBehavior,
)
class SeparatorWithHeading(FloatLayout):
heading = StringProperty("")
class CheckContainer(ChildrenFromDataBehavior):
check_one = BooleanProperty(False)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings["current_state"] = self.conditional_uncheck
def conditional_uncheck(self, instance, value):
if self.check_one:
for check_element in [
others for others in self.children if others != instance and value
]:
check_element.current_state = False
def get_checked(self, attribute_name=None):
checked_elements = [
element for element in self.children[::-1] if element.current_state
]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
class CheckCard(ThemableColorChangeBehavior, MDCard):
text = StringProperty("test " * 15)
def on_press(self):
self.current_state = (
not self.current_state
)
class CheckChip(
CircularRippleBehavior,
ButtonBehavior,
ThemableColorChangeBehavior,
BoxLayout,
):
icon = StringProperty("")
text = StringProperty("")
def on_press(self):
self.current_state = (
not self.current_state
)
class TransChip(TranslationOnCheckBehavior, CheckChip):
class CheckChipContainer(CheckContainer, ThemableBehavior, StackLayout):
child_class_name = "CheckChip"
draw_box = BooleanProperty(False)
class CheckImageTile(CheckBehavior, SmartTile):
border_width = NumericProperty(0.01)
def __init__(self, **kwargs):
self.state_dicts = {
True: {"opacity": 1, "border_width": 3},
False: {"opacity": 0.8, "border_width": 0.01},
}
super().__init__(**kwargs)
def on_press(self):
self.current_state = (
not self.current_state
)
class TransCard(LongPressBehavior, MDCard, RectangularRippleBehavior):
text_orig = StringProperty("")
text_trans = StringProperty("")
orientation = OptionProperty("vertical", options=["vertical", "horizontal"])
class LongPressImage(LongPressBehavior, AsyncImage):
Factory.register("LongPressImage", LongPressImage)
Factory.register("TransCard", TransCard)
class MyCarousel(FloatLayout, ChildrenFromDataBehavior):
carousel = ObjectProperty()
modal_layout_name = StringProperty()
modal_data_cls_name = StringProperty()
modal = ModalView()
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings = {
"height": self.update_height,
"on_press": self.open_menu,
}
self.on_data()
def update_num_children(self):
diff = len(self.data) - len(self.root_for_children.children) + 1
for _ in range(abs(diff)):
if diff > 0:
self.add_child()
else:
self.remove_child()
def on_data(self, *_):
if self.child_class_name:
self.update_num_children()
self.carousel.index = 1
for i, child_dict in enumerate(self.data, start=1):
for key, val in child_dict.items():
setattr(self.carousel.slides[i], key, val)
def remove_child(self):
last_slide = self.carousel.slides[-1]
self.carousel.remove_widget(last_slide)
def before_add_child(self, child):
self.bind(width=lambda *_: self.set_child_width(child))
def after_add_child(self, child):
self.set_child_width(child)
def set_child_width(self, child, *_):
width = self.width - self.ids.left_icon.width - self.ids.right_icon.width
setattr(child, "width", width)
def update_height(self, *_):
def get_modal_content(self, size_hint=(1, None)):
def set_carousel_index(i, *_):
self.carousel.index = i
self.modal.dismiss()
data_dicts = [
{"size_hint": size_hint, "on_press": partial(set_carousel_index, 0)}
] + [
{**dict, "size_hint": size_hint, "on_press": partial(set_carousel_index, i)}
for i, dict in enumerate(self.data, start=1)
]
recycle_view_cls = Factory.get(self.modal_layout_name)
recycle_view = recycle_view_cls()
recycle_view.child_class_name = self.modal_data_cls_name
recycle_view.data = data_dicts
return recycle_view
def get_checked(self, attribute_name=None):
checked_elements = [self.carousel.current_slide]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
def open_menu(self, *_):
self.modal = ModalView()
modal_content = self.get_modal_content()
self.modal.add_widget(modal_content)
self.modal.open()
class ImageCarousel(MyCarousel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.child_bindings["on_error"] = lambda *_: self.dispatch("on_error", *_)
self.register_event_type("on_error")
self.on_data()
def get_modal_content(self, size_hint=(1, 1)):
return super().get_modal_content(size_hint=size_hint)
def on_error(self, *_):
class CardCarousel(MyCarousel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
del self.child_bindings["on_press"]
def update_height(self, *_):
if self.carousel.current_slide:
new_height = self.carousel.current_slide.height + 24
if self.height != new_height:
anim = Animation(height=new_height, duration=0.2)
anim.start(self)
class RecycleCarousel(FloatLayout):
carousel = ObjectProperty()
viewclass = StringProperty("TransCard")
data = ListProperty()
slide_width = NumericProperty()
dynamic_height = BooleanProperty(False)
index = NumericProperty(0)
last_carousel_index = NumericProperty(0)
current_slide = ObjectProperty()
modal_layout_name = StringProperty()
modal_data_cls_name = StringProperty()
modal = ObjectProperty(ModalView())
default_modal_size_hint = ListProperty([1, None])
def update_height(self, *_):
if self.dynamic_height:
new_height = self.carousel.current_slide.height + 24
if self.height != new_height:
anim = Animation(height=new_height, duration=0.3)
anim.start(self)
def setup_modal(self):
self.modal = ModalView()
modal_root_cls = Factory.get(self.modal_layout_name)
modal_root = modal_root_cls()
self.modal.add_widget(modal_root)
def _modal_child_callback(self, i, *_):
self.set_index(i)
self.modal.dismiss()
def update_modal_content(self):
data_dicts = [
{
**dict,
"size_hint": self.default_modal_size_hint,
"on_press": partial(self._modal_child_callback, i),
}
for i, dict in enumerate(self.data)
]
self.modal.children[0].child_class_name = self.modal_data_cls_name
self.modal.children[0].data = data_dicts
def get_checked(self, attribute_name=None):
checked_elements = [self.carousel.current_slide]
if attribute_name is None:
return checked_elements
return [
getattr(element, attribute_name) for element in checked_elements if element
]
def open_menu(self, *_):
if not self.modal.children:
self.setup_modal()
self.update_modal_content()
self.modal.open()
def on_data(self, *_):
self.carousel.clear_widgets()
if len(self.data) >= 3:
for i in [0, 1, -1]:
widget = Factory.get(self.viewclass)(**self.data[i])
self.carousel.add_widget(widget)
self.bind(slide_width=widget.setter("width"))
widget.bind(on_press=self.open_menu)
widget.width = self.slide_width
self.carousel.register_event_type("on_index")
self.carousel.bind(index=self.update_index)
self.carousel.bind(current_slide=self.update_height)
self.carousel.current_slide.bind(height=self.update_height)
print("RecylceCarousel needs at least 3 elements to be displayed correctly.")
def update_index(self, _, carousel_index):
diff = carousel_index - self.last_carousel_index
diff = -1 if diff == 2 else 1 if diff == -2 else diff
self.last_carousel_index = carousel_index
self.index = (self.index + diff) % len(self.data)
self.update_slide(carousel_index + diff, self.index + diff)
def update_slide(self, carousel_index, index):
carousel_index %= 3
index %= len(self.data)
for name, val in self.data[index].items():
setattr(self.carousel.slides[carousel_index], name, val)
def set_index(self, index):
self.index = index
self.update_height()
for i in [0, 1, -1]:
self.update_slide((self.last_carousel_index + i) % 3, self.index + i)
if __name__ == "__main__":
CARD_CAROUSEL_STRING = (
"CardCarousel:\n"
' data: [{"text_orig":str(i)*50*i,"text_trans":"Trans"} for i in range(10)]'
)
RECYCLE_CAROUSEL_STRING = (
"RecycleCardCarousel:\n"
' data: [{"text_orig":str(i)*50*i,"text_trans":"Trans"} for i in range(10)]'
)
IMAGE_CAROUSEL_STRING = (
"ImageCarousel:\n"
' data: [{"source":"../assets/AnkiCardGen.png"} for _ in range(5)]'
)
class _TestApp(MDApp):
def build(self):
self.theme_cls.primary_palette = "Red"
self.theme_cls.theme_style = "Light"
return Builder.load_string(RECYCLE_CAROUSEL_STRING)
_TestApp().run()
| true | true |
f7177f17985f47452311b910b6f0dc8fb2631393 | 276 | py | Python | packaging/setup/plugins/ovirt-engine-setup/ovirt_imageio/__init__.py | jihwahn1018/ovirt-engine | 5c8a3d9a9637eefb28e4accc3cbd2b7f530d5ec9 | [
"Apache-2.0"
] | 347 | 2015-01-20T14:13:21.000Z | 2022-03-31T17:53:11.000Z | packaging/setup/plugins/ovirt-engine-setup/ovirt_imageio/__init__.py | jihwahn1018/ovirt-engine | 5c8a3d9a9637eefb28e4accc3cbd2b7f530d5ec9 | [
"Apache-2.0"
] | 128 | 2015-05-22T19:14:32.000Z | 2022-03-31T08:11:18.000Z | packaging/setup/plugins/ovirt-engine-setup/ovirt_imageio/__init__.py | jihwahn1018/ovirt-engine | 5c8a3d9a9637eefb28e4accc3cbd2b7f530d5ec9 | [
"Apache-2.0"
] | 202 | 2015-01-04T06:20:49.000Z | 2022-03-08T15:30:08.000Z | #
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""ovirt-imageio setup plugin."""
from otopi import util
from . import config
@util.export
def createPlugins(context):
config.Plugin(context=context)
| 13.142857 | 42 | 0.721014 |
from otopi import util
from . import config
@util.export
def createPlugins(context):
config.Plugin(context=context)
| true | true |
f71780030f07f5124673cea42eacb5d831327dea | 543 | py | Python | manage.py | arpitmisraw/Effervescence18-Website | 3c510f066986af80aaced566b32c5040310d3107 | [
"MIT"
] | 2 | 2018-06-27T20:46:16.000Z | 2018-08-02T11:02:26.000Z | manage.py | arpitmisraw/Effervescence18-Website | 3c510f066986af80aaced566b32c5040310d3107 | [
"MIT"
] | 3 | 2020-06-05T18:15:37.000Z | 2021-06-10T20:20:29.000Z | manage.py | arpitmisraw/Effervescence18-Website | 3c510f066986af80aaced566b32c5040310d3107 | [
"MIT"
] | 1 | 2019-08-01T12:06:35.000Z | 2019-08-01T12:06:35.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "effe_portal.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.9375 | 75 | 0.688766 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "effe_portal.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true | true |
f71780c4b08f54dd66b5a1991c6159621e4cec1f | 18,071 | py | Python | test/cpython/test_audioop.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:28:45.000Z | 2020-02-06T14:28:45.000Z | test/cpython/test_audioop.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/cpython/test_audioop.py | aisk/pyston | ac69cfef0621dbc8901175e84fa2b5cb5781a646 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2020-02-06T14:29:00.000Z | 2020-02-06T14:29:00.000Z | # expected: fail
import audioop
import sys
import unittest
import struct
from test.test_support import run_unittest
formats = {
1: 'b',
2: 'h',
4: 'i',
}
def pack(width, data):
return struct.pack('=%d%s' % (len(data), formats[width]), *data)
packs = {
1: lambda *data: pack(1, data),
2: lambda *data: pack(2, data),
4: lambda *data: pack(4, data),
}
maxvalues = {w: (1 << (8 * w - 1)) - 1 for w in (1, 2, 4)}
minvalues = {w: -1 << (8 * w - 1) for w in (1, 2, 4)}
datas = {
1: b'\x00\x12\x45\xbb\x7f\x80\xff',
2: packs[2](0, 0x1234, 0x4567, -0x4567, 0x7fff, -0x8000, -1),
4: packs[4](0, 0x12345678, 0x456789ab, -0x456789ab,
0x7fffffff, -0x80000000, -1),
}
INVALID_DATA = [
(b'abc', 0),
(b'abc', 2),
(b'abc', 4),
]
class TestAudioop(unittest.TestCase):
def test_max(self):
for w in 1, 2, 4:
self.assertEqual(audioop.max(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.max(p(5), w), 5)
self.assertEqual(audioop.max(p(5, -8, -1), w), 8)
self.assertEqual(audioop.max(p(maxvalues[w]), w), maxvalues[w])
self.assertEqual(audioop.max(p(minvalues[w]), w), -minvalues[w])
self.assertEqual(audioop.max(datas[w], w), -minvalues[w])
def test_minmax(self):
for w in 1, 2, 4:
self.assertEqual(audioop.minmax(b'', w),
(0x7fffffff, -0x80000000))
p = packs[w]
self.assertEqual(audioop.minmax(p(5), w), (5, 5))
self.assertEqual(audioop.minmax(p(5, -8, -1), w), (-8, 5))
self.assertEqual(audioop.minmax(p(maxvalues[w]), w),
(maxvalues[w], maxvalues[w]))
self.assertEqual(audioop.minmax(p(minvalues[w]), w),
(minvalues[w], minvalues[w]))
self.assertEqual(audioop.minmax(datas[w], w),
(minvalues[w], maxvalues[w]))
def test_maxpp(self):
for w in 1, 2, 4:
self.assertEqual(audioop.maxpp(b'', w), 0)
self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.maxpp(datas[w], w),
maxvalues[w] - minvalues[w])
def test_avg(self):
for w in 1, 2, 4:
self.assertEqual(audioop.avg(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.avg(p(5), w), 5)
self .assertEqual(audioop.avg(p(5, 8), w), 6)
self.assertEqual(audioop.avg(p(5, -8), w), -2)
self.assertEqual(audioop.avg(p(maxvalues[w], maxvalues[w]), w),
maxvalues[w])
self.assertEqual(audioop.avg(p(minvalues[w], minvalues[w]), w),
minvalues[w])
self.assertEqual(audioop.avg(packs[4](0x50000000, 0x70000000), 4),
0x60000000)
self.assertEqual(audioop.avg(packs[4](-0x50000000, -0x70000000), 4),
-0x60000000)
def test_avgpp(self):
for w in 1, 2, 4:
self.assertEqual(audioop.avgpp(b'', w), 0)
self.assertEqual(audioop.avgpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.avgpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.avgpp(datas[1], 1), 196)
self.assertEqual(audioop.avgpp(datas[2], 2), 50534)
self.assertEqual(audioop.avgpp(datas[4], 4), 3311897002)
def test_rms(self):
for w in 1, 2, 4:
self.assertEqual(audioop.rms(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.rms(p(*range(100)), w), 57)
self.assertAlmostEqual(audioop.rms(p(maxvalues[w]) * 5, w),
maxvalues[w], delta=1)
self.assertAlmostEqual(audioop.rms(p(minvalues[w]) * 5, w),
-minvalues[w], delta=1)
self.assertEqual(audioop.rms(datas[1], 1), 77)
self.assertEqual(audioop.rms(datas[2], 2), 20001)
self.assertEqual(audioop.rms(datas[4], 4), 1310854152)
def test_cross(self):
for w in 1, 2, 4:
self.assertEqual(audioop.cross(b'', w), -1)
p = packs[w]
self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)
self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1)
def test_add(self):
for w in 1, 2, 4:
self.assertEqual(audioop.add(b'', b'', w), b'')
self.assertEqual(audioop.add(datas[w], b'\0' * len(datas[w]), w),
datas[w])
self.assertEqual(audioop.add(datas[1], datas[1], 1),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.add(datas[2], datas[2], 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.add(datas[4], datas[4], 4),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_bias(self):
for w in 1, 2, 4:
for bias in 0, 1, -1, 127, -128, 0x7fffffff, -0x80000000:
self.assertEqual(audioop.bias(b'', w, bias), b'')
self.assertEqual(audioop.bias(datas[1], 1, 1),
b'\x01\x13\x46\xbc\x80\x81\x00')
self.assertEqual(audioop.bias(datas[1], 1, -1),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, 0x7fffffff),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, -0x80000000),
datas[1])
self.assertEqual(audioop.bias(datas[2], 2, 1),
packs[2](1, 0x1235, 0x4568, -0x4566, -0x8000, -0x7fff, 0))
self.assertEqual(audioop.bias(datas[2], 2, -1),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, 0x7fffffff),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, -0x80000000),
datas[2])
self.assertEqual(audioop.bias(datas[4], 4, 1),
packs[4](1, 0x12345679, 0x456789ac, -0x456789aa,
-0x80000000, -0x7fffffff, 0))
self.assertEqual(audioop.bias(datas[4], 4, -1),
packs[4](-1, 0x12345677, 0x456789aa, -0x456789ac,
0x7ffffffe, 0x7fffffff, -2))
self.assertEqual(audioop.bias(datas[4], 4, 0x7fffffff),
packs[4](0x7fffffff, -0x6dcba989, -0x3a987656, 0x3a987654,
-2, -1, 0x7ffffffe))
self.assertEqual(audioop.bias(datas[4], 4, -0x80000000),
packs[4](-0x80000000, -0x6dcba988, -0x3a987655, 0x3a987655,
-1, 0, 0x7fffffff))
def test_lin2lin(self):
for w in 1, 2, 4:
self.assertEqual(audioop.lin2lin(datas[w], w, w), datas[w])
self.assertEqual(audioop.lin2lin(datas[1], 1, 2),
packs[2](0, 0x1200, 0x4500, -0x4500, 0x7f00, -0x8000, -0x100))
self.assertEqual(audioop.lin2lin(datas[1], 1, 4),
packs[4](0, 0x12000000, 0x45000000, -0x45000000,
0x7f000000, -0x80000000, -0x1000000))
self.assertEqual(audioop.lin2lin(datas[2], 2, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[2], 2, 4),
packs[4](0, 0x12340000, 0x45670000, -0x45670000,
0x7fff0000, -0x80000000, -0x10000))
self.assertEqual(audioop.lin2lin(datas[4], 4, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[4], 4, 2),
packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1))
def test_adpcm2lin(self):
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 2, None),
(packs[2](0, 0xb, 0x29, -0x16, 0x72, -0xb3), (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 4, None),
(packs[4](0, 0xb0000, 0x290000, -0x160000, 0x720000,
-0xb30000), (-179, 40)))
# Very cursory test
for w in 1, 2, 4:
self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None),
(b'\0' * w * 10, (0, 0)))
def test_lin2adpcm(self):
self.assertEqual(audioop.lin2adpcm(datas[1], 1, None),
(b'\x07\x7f\x7f', (-221, 39)))
self.assertEqual(audioop.lin2adpcm(datas[2], 2, None),
(b'\x07\x7f\x7f', (31, 39)))
self.assertEqual(audioop.lin2adpcm(datas[4], 4, None),
(b'\x07\x7f\x7f', (31, 39)))
# Very cursory test
for w in 1, 2, 4:
self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None),
(b'\0' * 5, (0, 0)))
def test_lin2alaw(self):
self.assertEqual(audioop.lin2alaw(datas[1], 1),
b'\xd5\x87\xa4\x24\xaa\x2a\x5a')
self.assertEqual(audioop.lin2alaw(datas[2], 2),
b'\xd5\x87\xa4\x24\xaa\x2a\x55')
self.assertEqual(audioop.lin2alaw(datas[4], 4),
b'\xd5\x87\xa4\x24\xaa\x2a\x55')
def test_alaw2lin(self):
encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\
b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff'
src = [-688, -720, -2240, -4032, -9, -3, -1, -27, -244, -82, -106,
688, 720, 2240, 4032, 9, 3, 1, 27, 244, 82, 106]
for w in 1, 2, 4:
self.assertEqual(audioop.alaw2lin(encoded, w),
packs[w](*(x << (w * 8) >> 13 for x in src)))
encoded = ''.join(chr(x) for x in xrange(256))
for w in 2, 4:
decoded = audioop.alaw2lin(encoded, w)
self.assertEqual(audioop.lin2alaw(decoded, w), encoded)
def test_lin2ulaw(self):
self.assertEqual(audioop.lin2ulaw(datas[1], 1),
b'\xff\xad\x8e\x0e\x80\x00\x67')
self.assertEqual(audioop.lin2ulaw(datas[2], 2),
b'\xff\xad\x8e\x0e\x80\x00\x7e')
self.assertEqual(audioop.lin2ulaw(datas[4], 4),
b'\xff\xad\x8e\x0e\x80\x00\x7e')
def test_ulaw2lin(self):
encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\
b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff'
src = [-8031, -4447, -1471, -495, -163, -53, -18, -6, -2, 0,
8031, 4447, 1471, 495, 163, 53, 18, 6, 2, 0]
for w in 1, 2, 4:
self.assertEqual(audioop.ulaw2lin(encoded, w),
packs[w](*(x << (w * 8) >> 14 for x in src)))
# Current u-law implementation has two codes fo 0: 0x7f and 0xff.
encoded = ''.join(chr(x) for x in range(127) + range(128, 256))
for w in 2, 4:
decoded = audioop.ulaw2lin(encoded, w)
self.assertEqual(audioop.lin2ulaw(decoded, w), encoded)
def test_mul(self):
for w in 1, 2, 4:
self.assertEqual(audioop.mul(b'', w, 2), b'')
self.assertEqual(audioop.mul(datas[w], w, 0),
b'\0' * len(datas[w]))
self.assertEqual(audioop.mul(datas[w], w, 1),
datas[w])
self.assertEqual(audioop.mul(datas[1], 1, 2),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.mul(datas[2], 2, 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.mul(datas[4], 4, 2),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_ratecv(self):
for w in 1, 2, 4:
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(b'', w, 5, 8000, 8000, None),
(b'', (-1, ((0, 0),) * 5)))
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 16000, None),
(b'', (-2, ((0, 0),))))
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None)[0],
datas[w])
state = None
d1, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
d2, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
self.assertEqual(d1 + d2, b'\000\000\001\001\002\001\000\000\001\001\002')
for w in 1, 2, 4:
d0, state0 = audioop.ratecv(datas[w], w, 1, 8000, 16000, None)
d, state = b'', None
for i in range(0, len(datas[w]), w):
d1, state = audioop.ratecv(datas[w][i:i + w], w, 1,
8000, 16000, state)
d += d1
self.assertEqual(d, d0)
self.assertEqual(state, state0)
def test_reverse(self):
for w in 1, 2, 4:
self.assertEqual(audioop.reverse(b'', w), b'')
self.assertEqual(audioop.reverse(packs[w](0, 1, 2), w),
packs[w](2, 1, 0))
def test_tomono(self):
for w in 1, 2, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 1, 0), data1)
self.assertEqual(audioop.tomono(str(data2), w, 0, 1), b'\0' * len(data1))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 0.5, 0.5), data1)
def test_tostereo(self):
for w in 1, 2, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 0), data2)
self.assertEqual(audioop.tostereo(data1, w, 0, 0), b'\0' * len(data2))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 1), data2)
def test_findfactor(self):
self.assertEqual(audioop.findfactor(datas[2], datas[2]), 1.0)
self.assertEqual(audioop.findfactor(b'\0' * len(datas[2]), datas[2]),
0.0)
def test_findfit(self):
self.assertEqual(audioop.findfit(datas[2], datas[2]), (0, 1.0))
self.assertEqual(audioop.findfit(datas[2], packs[2](1, 2, 0)),
(1, 8038.8))
self.assertEqual(audioop.findfit(datas[2][:-2] * 5 + datas[2], datas[2]),
(30, 1.0))
def test_findmax(self):
self.assertEqual(audioop.findmax(datas[2], 1), 5)
def test_getsample(self):
for w in 1, 2, 4:
data = packs[w](0, 1, -1, maxvalues[w], minvalues[w])
self.assertEqual(audioop.getsample(data, w, 0), 0)
self.assertEqual(audioop.getsample(data, w, 1), 1)
self.assertEqual(audioop.getsample(data, w, 2), -1)
self.assertEqual(audioop.getsample(data, w, 3), maxvalues[w])
self.assertEqual(audioop.getsample(data, w, 4), minvalues[w])
def test_negativelen(self):
# from issue 3306, previously it segfaulted
self.assertRaises(audioop.error,
audioop.findmax, ''.join( chr(x) for x in xrange(256)), -2392392)
def test_issue7673(self):
state = None
for data, size in INVALID_DATA:
size2 = size
self.assertRaises(audioop.error, audioop.getsample, data, size, 0)
self.assertRaises(audioop.error, audioop.max, data, size)
self.assertRaises(audioop.error, audioop.minmax, data, size)
self.assertRaises(audioop.error, audioop.avg, data, size)
self.assertRaises(audioop.error, audioop.rms, data, size)
self.assertRaises(audioop.error, audioop.avgpp, data, size)
self.assertRaises(audioop.error, audioop.maxpp, data, size)
self.assertRaises(audioop.error, audioop.cross, data, size)
self.assertRaises(audioop.error, audioop.mul, data, size, 1.0)
self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.add, data, data, size)
self.assertRaises(audioop.error, audioop.bias, data, size, 0)
self.assertRaises(audioop.error, audioop.reverse, data, size)
self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2)
self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state)
self.assertRaises(audioop.error, audioop.lin2ulaw, data, size)
self.assertRaises(audioop.error, audioop.lin2alaw, data, size)
self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state)
def test_wrongsize(self):
data = b'abcdefgh'
state = None
for size in (-1, 0, 3, 5, 1024):
self.assertRaises(audioop.error, audioop.ulaw2lin, data, size)
self.assertRaises(audioop.error, audioop.alaw2lin, data, size)
self.assertRaises(audioop.error, audioop.adpcm2lin, data, size, state)
def test_main():
run_unittest(TestAudioop)
if __name__ == '__main__':
test_main()
| 45.749367 | 88 | 0.531791 |
import audioop
import sys
import unittest
import struct
from test.test_support import run_unittest
formats = {
1: 'b',
2: 'h',
4: 'i',
}
def pack(width, data):
return struct.pack('=%d%s' % (len(data), formats[width]), *data)
packs = {
1: lambda *data: pack(1, data),
2: lambda *data: pack(2, data),
4: lambda *data: pack(4, data),
}
maxvalues = {w: (1 << (8 * w - 1)) - 1 for w in (1, 2, 4)}
minvalues = {w: -1 << (8 * w - 1) for w in (1, 2, 4)}
datas = {
1: b'\x00\x12\x45\xbb\x7f\x80\xff',
2: packs[2](0, 0x1234, 0x4567, -0x4567, 0x7fff, -0x8000, -1),
4: packs[4](0, 0x12345678, 0x456789ab, -0x456789ab,
0x7fffffff, -0x80000000, -1),
}
INVALID_DATA = [
(b'abc', 0),
(b'abc', 2),
(b'abc', 4),
]
class TestAudioop(unittest.TestCase):
def test_max(self):
for w in 1, 2, 4:
self.assertEqual(audioop.max(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.max(p(5), w), 5)
self.assertEqual(audioop.max(p(5, -8, -1), w), 8)
self.assertEqual(audioop.max(p(maxvalues[w]), w), maxvalues[w])
self.assertEqual(audioop.max(p(minvalues[w]), w), -minvalues[w])
self.assertEqual(audioop.max(datas[w], w), -minvalues[w])
def test_minmax(self):
for w in 1, 2, 4:
self.assertEqual(audioop.minmax(b'', w),
(0x7fffffff, -0x80000000))
p = packs[w]
self.assertEqual(audioop.minmax(p(5), w), (5, 5))
self.assertEqual(audioop.minmax(p(5, -8, -1), w), (-8, 5))
self.assertEqual(audioop.minmax(p(maxvalues[w]), w),
(maxvalues[w], maxvalues[w]))
self.assertEqual(audioop.minmax(p(minvalues[w]), w),
(minvalues[w], minvalues[w]))
self.assertEqual(audioop.minmax(datas[w], w),
(minvalues[w], maxvalues[w]))
def test_maxpp(self):
for w in 1, 2, 4:
self.assertEqual(audioop.maxpp(b'', w), 0)
self.assertEqual(audioop.maxpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.maxpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.maxpp(datas[w], w),
maxvalues[w] - minvalues[w])
def test_avg(self):
for w in 1, 2, 4:
self.assertEqual(audioop.avg(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.avg(p(5), w), 5)
self .assertEqual(audioop.avg(p(5, 8), w), 6)
self.assertEqual(audioop.avg(p(5, -8), w), -2)
self.assertEqual(audioop.avg(p(maxvalues[w], maxvalues[w]), w),
maxvalues[w])
self.assertEqual(audioop.avg(p(minvalues[w], minvalues[w]), w),
minvalues[w])
self.assertEqual(audioop.avg(packs[4](0x50000000, 0x70000000), 4),
0x60000000)
self.assertEqual(audioop.avg(packs[4](-0x50000000, -0x70000000), 4),
-0x60000000)
def test_avgpp(self):
for w in 1, 2, 4:
self.assertEqual(audioop.avgpp(b'', w), 0)
self.assertEqual(audioop.avgpp(packs[w](*range(100)), w), 0)
self.assertEqual(audioop.avgpp(packs[w](9, 10, 5, 5, 0, 1), w), 10)
self.assertEqual(audioop.avgpp(datas[1], 1), 196)
self.assertEqual(audioop.avgpp(datas[2], 2), 50534)
self.assertEqual(audioop.avgpp(datas[4], 4), 3311897002)
def test_rms(self):
for w in 1, 2, 4:
self.assertEqual(audioop.rms(b'', w), 0)
p = packs[w]
self.assertEqual(audioop.rms(p(*range(100)), w), 57)
self.assertAlmostEqual(audioop.rms(p(maxvalues[w]) * 5, w),
maxvalues[w], delta=1)
self.assertAlmostEqual(audioop.rms(p(minvalues[w]) * 5, w),
-minvalues[w], delta=1)
self.assertEqual(audioop.rms(datas[1], 1), 77)
self.assertEqual(audioop.rms(datas[2], 2), 20001)
self.assertEqual(audioop.rms(datas[4], 4), 1310854152)
def test_cross(self):
for w in 1, 2, 4:
self.assertEqual(audioop.cross(b'', w), -1)
p = packs[w]
self.assertEqual(audioop.cross(p(0, 1, 2), w), 0)
self.assertEqual(audioop.cross(p(1, 2, -3, -4), w), 1)
self.assertEqual(audioop.cross(p(-1, -2, 3, 4), w), 1)
self.assertEqual(audioop.cross(p(0, minvalues[w]), w), 1)
self.assertEqual(audioop.cross(p(minvalues[w], maxvalues[w]), w), 1)
def test_add(self):
for w in 1, 2, 4:
self.assertEqual(audioop.add(b'', b'', w), b'')
self.assertEqual(audioop.add(datas[w], b'\0' * len(datas[w]), w),
datas[w])
self.assertEqual(audioop.add(datas[1], datas[1], 1),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.add(datas[2], datas[2], 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.add(datas[4], datas[4], 4),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_bias(self):
for w in 1, 2, 4:
for bias in 0, 1, -1, 127, -128, 0x7fffffff, -0x80000000:
self.assertEqual(audioop.bias(b'', w, bias), b'')
self.assertEqual(audioop.bias(datas[1], 1, 1),
b'\x01\x13\x46\xbc\x80\x81\x00')
self.assertEqual(audioop.bias(datas[1], 1, -1),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, 0x7fffffff),
b'\xff\x11\x44\xba\x7e\x7f\xfe')
self.assertEqual(audioop.bias(datas[1], 1, -0x80000000),
datas[1])
self.assertEqual(audioop.bias(datas[2], 2, 1),
packs[2](1, 0x1235, 0x4568, -0x4566, -0x8000, -0x7fff, 0))
self.assertEqual(audioop.bias(datas[2], 2, -1),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, 0x7fffffff),
packs[2](-1, 0x1233, 0x4566, -0x4568, 0x7ffe, 0x7fff, -2))
self.assertEqual(audioop.bias(datas[2], 2, -0x80000000),
datas[2])
self.assertEqual(audioop.bias(datas[4], 4, 1),
packs[4](1, 0x12345679, 0x456789ac, -0x456789aa,
-0x80000000, -0x7fffffff, 0))
self.assertEqual(audioop.bias(datas[4], 4, -1),
packs[4](-1, 0x12345677, 0x456789aa, -0x456789ac,
0x7ffffffe, 0x7fffffff, -2))
self.assertEqual(audioop.bias(datas[4], 4, 0x7fffffff),
packs[4](0x7fffffff, -0x6dcba989, -0x3a987656, 0x3a987654,
-2, -1, 0x7ffffffe))
self.assertEqual(audioop.bias(datas[4], 4, -0x80000000),
packs[4](-0x80000000, -0x6dcba988, -0x3a987655, 0x3a987655,
-1, 0, 0x7fffffff))
def test_lin2lin(self):
for w in 1, 2, 4:
self.assertEqual(audioop.lin2lin(datas[w], w, w), datas[w])
self.assertEqual(audioop.lin2lin(datas[1], 1, 2),
packs[2](0, 0x1200, 0x4500, -0x4500, 0x7f00, -0x8000, -0x100))
self.assertEqual(audioop.lin2lin(datas[1], 1, 4),
packs[4](0, 0x12000000, 0x45000000, -0x45000000,
0x7f000000, -0x80000000, -0x1000000))
self.assertEqual(audioop.lin2lin(datas[2], 2, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[2], 2, 4),
packs[4](0, 0x12340000, 0x45670000, -0x45670000,
0x7fff0000, -0x80000000, -0x10000))
self.assertEqual(audioop.lin2lin(datas[4], 4, 1),
b'\x00\x12\x45\xba\x7f\x80\xff')
self.assertEqual(audioop.lin2lin(datas[4], 4, 2),
packs[2](0, 0x1234, 0x4567, -0x4568, 0x7fff, -0x8000, -1))
def test_adpcm2lin(self):
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 1, None),
(b'\x00\x00\x00\xff\x00\xff', (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 2, None),
(packs[2](0, 0xb, 0x29, -0x16, 0x72, -0xb3), (-179, 40)))
self.assertEqual(audioop.adpcm2lin(b'\x07\x7f\x7f', 4, None),
(packs[4](0, 0xb0000, 0x290000, -0x160000, 0x720000,
-0xb30000), (-179, 40)))
for w in 1, 2, 4:
self.assertEqual(audioop.adpcm2lin(b'\0' * 5, w, None),
(b'\0' * w * 10, (0, 0)))
def test_lin2adpcm(self):
self.assertEqual(audioop.lin2adpcm(datas[1], 1, None),
(b'\x07\x7f\x7f', (-221, 39)))
self.assertEqual(audioop.lin2adpcm(datas[2], 2, None),
(b'\x07\x7f\x7f', (31, 39)))
self.assertEqual(audioop.lin2adpcm(datas[4], 4, None),
(b'\x07\x7f\x7f', (31, 39)))
for w in 1, 2, 4:
self.assertEqual(audioop.lin2adpcm(b'\0' * w * 10, w, None),
(b'\0' * 5, (0, 0)))
def test_lin2alaw(self):
self.assertEqual(audioop.lin2alaw(datas[1], 1),
b'\xd5\x87\xa4\x24\xaa\x2a\x5a')
self.assertEqual(audioop.lin2alaw(datas[2], 2),
b'\xd5\x87\xa4\x24\xaa\x2a\x55')
self.assertEqual(audioop.lin2alaw(datas[4], 4),
b'\xd5\x87\xa4\x24\xaa\x2a\x55')
def test_alaw2lin(self):
encoded = b'\x00\x03\x24\x2a\x51\x54\x55\x58\x6b\x71\x7f'\
b'\x80\x83\xa4\xaa\xd1\xd4\xd5\xd8\xeb\xf1\xff'
src = [-688, -720, -2240, -4032, -9, -3, -1, -27, -244, -82, -106,
688, 720, 2240, 4032, 9, 3, 1, 27, 244, 82, 106]
for w in 1, 2, 4:
self.assertEqual(audioop.alaw2lin(encoded, w),
packs[w](*(x << (w * 8) >> 13 for x in src)))
encoded = ''.join(chr(x) for x in xrange(256))
for w in 2, 4:
decoded = audioop.alaw2lin(encoded, w)
self.assertEqual(audioop.lin2alaw(decoded, w), encoded)
def test_lin2ulaw(self):
self.assertEqual(audioop.lin2ulaw(datas[1], 1),
b'\xff\xad\x8e\x0e\x80\x00\x67')
self.assertEqual(audioop.lin2ulaw(datas[2], 2),
b'\xff\xad\x8e\x0e\x80\x00\x7e')
self.assertEqual(audioop.lin2ulaw(datas[4], 4),
b'\xff\xad\x8e\x0e\x80\x00\x7e')
def test_ulaw2lin(self):
encoded = b'\x00\x0e\x28\x3f\x57\x6a\x76\x7c\x7e\x7f'\
b'\x80\x8e\xa8\xbf\xd7\xea\xf6\xfc\xfe\xff'
src = [-8031, -4447, -1471, -495, -163, -53, -18, -6, -2, 0,
8031, 4447, 1471, 495, 163, 53, 18, 6, 2, 0]
for w in 1, 2, 4:
self.assertEqual(audioop.ulaw2lin(encoded, w),
packs[w](*(x << (w * 8) >> 14 for x in src)))
encoded = ''.join(chr(x) for x in range(127) + range(128, 256))
for w in 2, 4:
decoded = audioop.ulaw2lin(encoded, w)
self.assertEqual(audioop.lin2ulaw(decoded, w), encoded)
def test_mul(self):
for w in 1, 2, 4:
self.assertEqual(audioop.mul(b'', w, 2), b'')
self.assertEqual(audioop.mul(datas[w], w, 0),
b'\0' * len(datas[w]))
self.assertEqual(audioop.mul(datas[w], w, 1),
datas[w])
self.assertEqual(audioop.mul(datas[1], 1, 2),
b'\x00\x24\x7f\x80\x7f\x80\xfe')
self.assertEqual(audioop.mul(datas[2], 2, 2),
packs[2](0, 0x2468, 0x7fff, -0x8000, 0x7fff, -0x8000, -2))
self.assertEqual(audioop.mul(datas[4], 4, 2),
packs[4](0, 0x2468acf0, 0x7fffffff, -0x80000000,
0x7fffffff, -0x80000000, -2))
def test_ratecv(self):
for w in 1, 2, 4:
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 8000, None),
(b'', (-1, ((0, 0),))))
self.assertEqual(audioop.ratecv(b'', w, 5, 8000, 8000, None),
(b'', (-1, ((0, 0),) * 5)))
self.assertEqual(audioop.ratecv(b'', w, 1, 8000, 16000, None),
(b'', (-2, ((0, 0),))))
self.assertEqual(audioop.ratecv(datas[w], w, 1, 8000, 8000, None)[0],
datas[w])
state = None
d1, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
d2, state = audioop.ratecv(b'\x00\x01\x02', 1, 1, 8000, 16000, state)
self.assertEqual(d1 + d2, b'\000\000\001\001\002\001\000\000\001\001\002')
for w in 1, 2, 4:
d0, state0 = audioop.ratecv(datas[w], w, 1, 8000, 16000, None)
d, state = b'', None
for i in range(0, len(datas[w]), w):
d1, state = audioop.ratecv(datas[w][i:i + w], w, 1,
8000, 16000, state)
d += d1
self.assertEqual(d, d0)
self.assertEqual(state, state0)
def test_reverse(self):
for w in 1, 2, 4:
self.assertEqual(audioop.reverse(b'', w), b'')
self.assertEqual(audioop.reverse(packs[w](0, 1, 2), w),
packs[w](2, 1, 0))
def test_tomono(self):
for w in 1, 2, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 1, 0), data1)
self.assertEqual(audioop.tomono(str(data2), w, 0, 1), b'\0' * len(data1))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tomono(str(data2), w, 0.5, 0.5), data1)
def test_tostereo(self):
for w in 1, 2, 4:
data1 = datas[w]
data2 = bytearray(2 * len(data1))
for k in range(w):
data2[k::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 0), data2)
self.assertEqual(audioop.tostereo(data1, w, 0, 0), b'\0' * len(data2))
for k in range(w):
data2[k+w::2*w] = data1[k::w]
self.assertEqual(audioop.tostereo(data1, w, 1, 1), data2)
def test_findfactor(self):
self.assertEqual(audioop.findfactor(datas[2], datas[2]), 1.0)
self.assertEqual(audioop.findfactor(b'\0' * len(datas[2]), datas[2]),
0.0)
def test_findfit(self):
self.assertEqual(audioop.findfit(datas[2], datas[2]), (0, 1.0))
self.assertEqual(audioop.findfit(datas[2], packs[2](1, 2, 0)),
(1, 8038.8))
self.assertEqual(audioop.findfit(datas[2][:-2] * 5 + datas[2], datas[2]),
(30, 1.0))
def test_findmax(self):
self.assertEqual(audioop.findmax(datas[2], 1), 5)
def test_getsample(self):
for w in 1, 2, 4:
data = packs[w](0, 1, -1, maxvalues[w], minvalues[w])
self.assertEqual(audioop.getsample(data, w, 0), 0)
self.assertEqual(audioop.getsample(data, w, 1), 1)
self.assertEqual(audioop.getsample(data, w, 2), -1)
self.assertEqual(audioop.getsample(data, w, 3), maxvalues[w])
self.assertEqual(audioop.getsample(data, w, 4), minvalues[w])
def test_negativelen(self):
self.assertRaises(audioop.error,
audioop.findmax, ''.join( chr(x) for x in xrange(256)), -2392392)
def test_issue7673(self):
state = None
for data, size in INVALID_DATA:
size2 = size
self.assertRaises(audioop.error, audioop.getsample, data, size, 0)
self.assertRaises(audioop.error, audioop.max, data, size)
self.assertRaises(audioop.error, audioop.minmax, data, size)
self.assertRaises(audioop.error, audioop.avg, data, size)
self.assertRaises(audioop.error, audioop.rms, data, size)
self.assertRaises(audioop.error, audioop.avgpp, data, size)
self.assertRaises(audioop.error, audioop.maxpp, data, size)
self.assertRaises(audioop.error, audioop.cross, data, size)
self.assertRaises(audioop.error, audioop.mul, data, size, 1.0)
self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5)
self.assertRaises(audioop.error, audioop.add, data, data, size)
self.assertRaises(audioop.error, audioop.bias, data, size, 0)
self.assertRaises(audioop.error, audioop.reverse, data, size)
self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2)
self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state)
self.assertRaises(audioop.error, audioop.lin2ulaw, data, size)
self.assertRaises(audioop.error, audioop.lin2alaw, data, size)
self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state)
def test_wrongsize(self):
data = b'abcdefgh'
state = None
for size in (-1, 0, 3, 5, 1024):
self.assertRaises(audioop.error, audioop.ulaw2lin, data, size)
self.assertRaises(audioop.error, audioop.alaw2lin, data, size)
self.assertRaises(audioop.error, audioop.adpcm2lin, data, size, state)
def test_main():
run_unittest(TestAudioop)
if __name__ == '__main__':
test_main()
| true | true |
f7178158b29d68a175b0ebeab0012d4e31f2f3e6 | 2,610 | py | Python | boofuzz/requests/http_post.py | youngcraft/boofuzz-modbus | bfeb48345b56797b48079e0620e7b06b27085789 | [
"Apache-2.0"
] | 23 | 2018-08-11T12:12:33.000Z | 2022-01-28T10:22:49.000Z | boofuzz/requests/http_post.py | ctf-fuzzer/boofuzz-modbus | bfeb48345b56797b48079e0620e7b06b27085789 | [
"Apache-2.0"
] | 2 | 2018-07-24T15:15:40.000Z | 2020-07-12T13:06:56.000Z | boofuzz/requests/http_post.py | ctf-fuzzer/boofuzz-modbus | bfeb48345b56797b48079e0620e7b06b27085789 | [
"Apache-2.0"
] | 10 | 2018-04-02T13:21:36.000Z | 2022-01-17T09:20:27.000Z | from boofuzz import *
# All POST mimetypes that I could think of/find
# List of all blocks defined here (for easy copy/paste)
"""
sess.connect(s_get("HTTP VERBS POST"))
sess.connect(s_get("HTTP VERBS POST ALL"))
sess.connect(s_get("HTTP VERBS POST REQ"))
"""
# Fuzz POST requests with most MIMETypes known
s_initialize("HTTP VERBS POST ALL")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_group("mimetypes", values=[
"audio/basic",
"audio/x-mpeg",
"drawing/x-dwf",
"graphics/x-inventor",
"image/x-portable-bitmap",
"message/external-body",
"message/http",
"message/news",
"message/partial",
"message/rfc822",
"multipart/alternative",
"multipart/appledouble",
"multipart/digest",
"multipart/form-data",
"multipart/header-set",
"multipart/mixed",
"multipart/parallel",
"multipart/related",
"multipart/report",
"multipart/voice-message",
"multipart/x-mixed-replace",
"text/css",
"text/enriched",
"text/html",
"text/javascript",
"text/plain",
"text/richtext",
"text/sgml",
"text/tab-separated-values",
"text/vbscript",
"video/x-msvideo",
"video/x-sgi-movie",
"workbook/formulaone",
"x-conference/x-cooltalk",
"x-form/x-openscape",
"x-music/x-midi",
"x-script/x-wfxclient",
"x-world/x-3dmf"
])
if s_block_start("mime", group="mimetypes"):
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n\r\n")
s_block_end()
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
# Basic fuzz of post payloads
s_initialize("HTTP VERBS POST")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_string("application/x-www-form-urlencoded")
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n")
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
# Fuzz POST request MIMETypes
s_initialize("HTTP VERBS POST REQ")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_string("application")
s_delim("/")
s_string("x")
s_delim("-")
s_string("www")
s_delim("-")
s_string("form")
s_delim("-")
s_string("urlencoded")
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n")
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n") | 23.944954 | 74 | 0.650958 | from boofuzz import *
s_initialize("HTTP VERBS POST ALL")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_group("mimetypes", values=[
"audio/basic",
"audio/x-mpeg",
"drawing/x-dwf",
"graphics/x-inventor",
"image/x-portable-bitmap",
"message/external-body",
"message/http",
"message/news",
"message/partial",
"message/rfc822",
"multipart/alternative",
"multipart/appledouble",
"multipart/digest",
"multipart/form-data",
"multipart/header-set",
"multipart/mixed",
"multipart/parallel",
"multipart/related",
"multipart/report",
"multipart/voice-message",
"multipart/x-mixed-replace",
"text/css",
"text/enriched",
"text/html",
"text/javascript",
"text/plain",
"text/richtext",
"text/sgml",
"text/tab-separated-values",
"text/vbscript",
"video/x-msvideo",
"video/x-sgi-movie",
"workbook/formulaone",
"x-conference/x-cooltalk",
"x-form/x-openscape",
"x-music/x-midi",
"x-script/x-wfxclient",
"x-world/x-3dmf"
])
if s_block_start("mime", group="mimetypes"):
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n\r\n")
s_block_end()
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
s_initialize("HTTP VERBS POST")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_string("application/x-www-form-urlencoded")
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n")
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n")
s_initialize("HTTP VERBS POST REQ")
s_static("POST / HTTP/1.1\r\n")
s_static("Content-Type: ")
s_string("application")
s_delim("/")
s_string("x")
s_delim("-")
s_string("www")
s_delim("-")
s_string("form")
s_delim("-")
s_string("urlencoded")
s_static("\r\n")
s_static("Content-Length: ")
s_size("post blob", output_format="ascii", signed=True, fuzzable=True)
s_static("\r\n")
if s_block_start("post blob"):
s_string("A" * 100 + "=" + "B" * 100)
s_block_end()
s_static("\r\n\r\n") | true | true |
f71781cb54b95efe9a515ff1e8acbb559ba8adb6 | 70,600 | py | Python | pyspeckit/spectrum/readers/read_class.py | FaceThePirate/pyspeckit | 734b9f81d440ca3a6db9bf68e9409dbddb52d08b | [
"MIT",
"BSD-3-Clause"
] | null | null | null | pyspeckit/spectrum/readers/read_class.py | FaceThePirate/pyspeckit | 734b9f81d440ca3a6db9bf68e9409dbddb52d08b | [
"MIT",
"BSD-3-Clause"
] | null | null | null | pyspeckit/spectrum/readers/read_class.py | FaceThePirate/pyspeckit | 734b9f81d440ca3a6db9bf68e9409dbddb52d08b | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """
------------------------
GILDAS CLASS file reader
------------------------
Read a CLASS file into an :class:`pyspeckit.spectrum.ObsBlock`
"""
from __future__ import print_function
from six.moves import xrange
from six import iteritems
import six
import astropy.io.fits as pyfits
import numpy
import numpy as np
from numpy import pi
from astropy import log
# from astropy.time import Time
from astropy import units as u
import pyspeckit
import sys
import re
try:
from astropy.utils.console import ProgressBar
except ImportError:
ProgressBar = lambda x: None
ProgressBar.update = lambda x: None
import struct
import time
# 'range' is needed as a keyword
irange = range
def print_timing(func):
"""
Prints execution time of decorated function.
Included here because CLASS files can take a little while to read;
this should probably be replaced with a progressbar
"""
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
log.info('%s took %0.5g s' % (func.__name__, (t2-t1)))
return res
wrapper.__doc__ = func.__doc__
return wrapper
def ensure_bytes(string):
"""
Ensure a given string is in byte form
"""
if six.PY3:
return bytes(string, 'utf-8')
else:
return str(string)
""" Specification: http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html """
filetype_dict = {'1A ':'Multiple_IEEE',
'1 ':'Multiple_Vax',
'1B ':'Multiple_EEEI',
'2A ':'v2',
'2 ':'v2',
'2B ':'v2',
'9A ':'Single_IEEE',
'9 ':'Single_Vax',
'9B ':'Single_EEEI'}
for key in list(filetype_dict.keys()):
filetype_dict[ensure_bytes(key)] = filetype_dict[key]
fileversion_dict = {'1A ':'v1',
'2A ':'v2',
'9A ':'v1', # untested
}
for key in list(fileversion_dict.keys()):
fileversion_dict[ensure_bytes(key)] = fileversion_dict[key]
record_lengths = {'1A': 512,
'2A': 1024*4}
header_id_numbers = {0: 'USER CODE',
-1: 'COMMENT',
-2: 'GENERAL',
-3: 'POSITION',
-4: 'SPECTRO',
-5: 'BASELINE',
-6: 'HISTORY',
-7: 'UNKNOWN-APEX',
# -8: 'SWITCH',
-9: 'GAUSSFIT', # "private"; see class-interfaces-private.f90
-10: 'DRIFT',
-11: 'BEAMSWITCH', # "private"; see class-interfaces-private.f90
-12: 'SHELLFIT', # "private"; see class-interfaces-private.f90
-13: 'NH3FIT', # "private"; see class-interfaces-private.f90
-14: 'CALIBRATION',
-18: 'ABSFIT', # "private"; see class-interfaces-private.f90
}
header_id_lengths = {-2: 9, # may really be 10?
-3: 17,
-4: 17,
-5: None, # variable length
-6: 3, # variable length
-14: 25,
}
# from packages/classic/lib/classic_mod.f90
filedescv2_nw1=14
"""
GENERAL
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
! Written in the entry
real(kind=8) :: ut ! 1-2 [ rad] UT of observation
real(kind=8) :: st ! 3-4 [ rad] LST of observation
real(kind=4) :: az ! 5 [ rad] Azimuth
real(kind=4) :: el ! 6 [ rad] Elevation
real(kind=4) :: tau ! 7 [neper] Opacity
real(kind=4) :: tsys ! 8 [ K] System temperature
real(kind=4) :: time ! 9 [ s] Integration time
! Not in this section in file
integer(kind=4) :: xunit ! [ code] X unit (if X coordinates section is present)
! NOT in data ---
character(len=12) :: cdobs ! [string] Duplicate of dobs
character(len=12) :: cdred ! [string] Duplicate of dred
"""
keys_lengths = {
'unknown': [
#('NUM' ,1,'int32'), # Observation number
('VER' ,1,'int32'), # Version number
('TELES' ,3,'|S12') , # Telescope name
('DOBS' ,1,'int32'), # Date of observation
('DRED' ,1,'int32'), # Date of reduction
('TYPEC' ,1,'int32'), # Type of coordinates
('KIND' ,1,'int32'), # Type of data
('QUAL' ,1,'int32'), # Quality of data
('SCAN' ,1,'int32'), # Scan number
('SUBSCAN' ,1,'int32'), # Subscan number
],
'COMMENT': [ # -1
('LTEXT',1,'int32'), # integer(kind=4) :: ltext ! Length of comment
('CTEXT',1024//4,'|S1024'), # character ctext*1024 ! Comment string
],
'GENERAL': [ # -2
('UT' ,2,'float64'), # rad UT of observation
('ST' ,2,'float64'), # rad LST of observation
('AZ' ,1,'float32'), # rad Azimuth
('EL' ,1,'float32'), # rad Elevation
('TAU' ,1,'float32'), # neper Opacity
('TSYS' ,1,'float32'), # K System temperature
('TIME' ,1,'float32'), # s Integration time
# XUNIT should not be there?
#( 'XUNIT' ,1,'int32'), # code X unit (if xcoord_sec is present)
] ,
'POSITION': [ # -3
('SOURC',3,'|S12') , # [ ] Source name
('EPOCH',1,'float32'), # [ ] Epoch of coordinates
('LAM' ,2,'float64'), #[rad] Lambda
('BET' ,2,'float64'), #[rad] Beta
('LAMOF',1,'float32'), # [rad] Offset in Lambda
('BETOF',1,'float32'), # [rad] Offset in Beta
('PROJ' ,1,'int32') , # [rad] Projection system
('SL0P' ,1,'float64'), # lambda of descriptive system # MAY NOT EXIST IN OLD CLASS
('SB0P' ,1,'float64'), # beta of descriptive system # MAY NOT EXIST IN OLD CLASS
('SK0P' ,1,'float64'), # angle of descriptive system # MAY NOT EXIST IN OLD CLASS
],
'SPECTRO': [ # -4
#('align' ,1,'int32'), # [ ] Alignment padding
('LINE' ,3,'|S12'), # [ ] Line name
('RESTF' ,2,'float64'), # [ MHz] Rest frequency
('NCHAN' ,1,'int32'), # [ ] Number of channels
('RCHAN' ,1,'float32'), # [ ] Reference channels
('FRES' ,1,'float32'), # [ MHz] Frequency resolution
('FOFF' ,1,'float32'), # [ MHz] Frequency offset
('VRES' ,1,'float32'), # [km/s] Velocity resolution
('VOFF' ,1,'float32'), # [km/s] Velocity at reference channel
('BAD' ,1,'float32'), # [ ] Blanking value
#('ALIGN_1',1,'int32'), # [ ] Alignment padding
('IMAGE' ,2,'float64'), # [ MHz] Image frequency
#('ALIGN_2',1,'int32'), # [ ] Alignment padding
('VTYPE' ,1,'int32'), # [code] Type of velocity
('DOPPLER',2,'float64'), # [ ] Doppler factor = -V/c (CLASS convention)
],
'CALIBRATION': [ # -14
('ALIGN',1,'int32'), # BUFFER (it's a zero - it is not declared in the docs!!!!)
('BEEFF',1,'float32'), # [ ] Beam efficiency
('FOEFF',1,'float32'), # [ ] Forward efficiency
('GAINI',1,'float32'), # [ ] Image/Signal gain ratio
('H2OMM',1,'float32'), # [ mm] Water vapor content
('PAMB',1,'float32'), # [ hPa] Ambient pressure
('TAMB',1,'float32'), # [ K] Ambient temperature
('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band
('TCHOP',1,'float32'), # [ K] Chopper temperature
('TCOLD',1,'float32'), # [ K] Cold load temperature
('TAUS',1,'float32'), # [neper] Opacity in signal band
('TAUI',1,'float32'), # [neper] Opacity in image band
('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band
('TREC',1,'float32'), # [ K] Receiver temperature
('CMODE',1,'int32'), # [ code] Calibration mode
('ATFAC',1,'float32'), # [ ] Applied calibration factor
('ALTI',1,'float32'), # [ m] Site elevation
('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold
('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement
('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement
('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS
('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS
],
'BASELINE':[
('DEG',1,'int32'), #! [ ] Degree of last baseline
('SIGFI',1,'float32'), #! [Int. unit] Sigma
('AIRE',1,'float32'), #! [Int. unit] Area under windows
('NWIND',1,'int32'), #! [ ] Number of line windows
# WARNING: These should probably have 'n', the second digit, = NWIND
# The docs are really unclear about this, they say "W1(MWIND)"
('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows
('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows
('SINUS',3,'float32'), #![] Sinus baseline results
],
'DRIFT':[ # 16?
('FREQ',1,'float64') , #! [ MHz] Rest frequency real(kind=8) ::
('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::
('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::
('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::
('TREF',1,'float32') , #! [ ?] Time at reference real(kind=4) ::
('AREF',1,'float32') , #! [ rad] Angular offset at ref. real(kind=4) ::
('APOS',1,'float32') , #! [ rad] Position angle of drift real(kind=4) ::
('TRES',1,'float32') , #! [ ?] Time resolution real(kind=4) ::
('ARES',1,'float32') , #! [ rad] Angular resolution real(kind=4) ::
('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::
('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::
('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::
('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::
('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::
],
}
def _read_bytes(f, n):
'''Read the next `n` bytes (from idlsave)'''
return f.read(n)
"""
Warning: UNCLEAR what endianness should be!
Numpy seemed to get it right, and I think numpy assumes NATIVE endianness
"""
def _read_byte(f):
'''Read a single byte (from idlsave)'''
return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])
def _read_int16(f):
'''Read a signed 16-bit integer (from idlsave)'''
return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])
def _read_int32(f):
'''Read a signed 32-bit integer (from idlsave)'''
return numpy.int32(struct.unpack('=i', f.read(4))[0])
def _read_int64(f):
'''Read a signed 64-bit integer '''
return numpy.int64(struct.unpack('=q', f.read(8))[0])
def _read_float32(f):
'''Read a 32-bit float (from idlsave)'''
return numpy.float32(struct.unpack('=f', f.read(4))[0])
def _align_32(f):
'''Align to the next 32-bit position in a file (from idlsave)'''
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _read_word(f,length):
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
else:
chars = None
return chars
def _read_int(f):
return struct.unpack('i',f.read(4))
def is_ascii(s):
"""Check if there are non-ascii characters in Unicode string
Parameters
----------
s : str
The string to be checked
Returns
-------
is_ascii : bool
Returns True if all characters in the string are ascii. False
otherwise.
"""
return len(s) == len(s.decode('ascii').encode('utf-8'))
def is_all_null(s):
return all(x=='\x00' for x in s) or all(x==b'\x00' for x in s)
"""
from clic_file.f90: v1, v2
integer(kind=4) :: bloc ! 1 : observation address [records] integer(kind=8) :: bloc ! 1- 2: observation address [records] integer(kind=4) :: bloc ! 1 : block read from index
integer(kind=4) :: num ! 2 : observation number integer(kind=4) :: word ! 3 : address offset [4-bytes] integer(kind=4) :: num ! 2 : number read
integer(kind=4) :: ver ! 3 : observation version integer(kind=4) :: ver ! 4 : observation version integer(kind=4) :: ver ! 3 : version read from index
integer(kind=4) :: sourc(3) ! 4- 6: source name integer(kind=8) :: num ! 5- 6: observation number character(len=12) :: csour ! 4- 6: source read from index
integer(kind=4) :: line(3) ! 7- 9: line name integer(kind=4) :: sourc(3) ! 7- 9: source name character(len=12) :: cline ! 7- 9: line read from index
integer(kind=4) :: teles(3) ! 10-12: telescope name integer(kind=4) :: line(3) ! 10-12: line name character(len=12) :: ctele ! 10-12: telescope read from index
integer(kind=4) :: dobs ! 13 : observation date [class_date] integer(kind=4) :: teles(3) ! 13-15: telescope name integer(kind=4) :: dobs ! 13 : date obs. read from index
integer(kind=4) :: dred ! 14 : reduction date [class_date] integer(kind=4) :: dobs ! 16 : observation date [class_date] integer(kind=4) :: dred ! 14 : date red. read from index
real(kind=4) :: off1 ! 15 : lambda offset [radian] integer(kind=4) :: dred ! 17 : reduction date [class_date] real(kind=4) :: off1 ! 15 : read offset 1
real(kind=4) :: off2 ! 16 : beta offset [radian] real(kind=4) :: off1 ! 18 : lambda offset [radian] real(kind=4) :: off2 ! 16 : read offset 2
integer(kind=4) :: typec ! 17 : coordinates types real(kind=4) :: off2 ! 19 : beta offset [radian] integer(kind=4) :: type ! 17 : type of read offsets
integer(kind=4) :: kind ! 18 : data kind integer(kind=4) :: typec ! 20 : coordinates types integer(kind=4) :: kind ! 18 : type of observation
integer(kind=4) :: qual ! 19 : data quality integer(kind=4) :: kind ! 21 : data kind integer(kind=4) :: qual ! 19 : Quality read from index
integer(kind=4) :: scan ! 20 : scan number integer(kind=4) :: qual ! 22 : data quality integer(kind=4) :: scan ! 20 : Scan number read from index
integer(kind=4) :: proc ! 21 : procedure type integer(kind=4) :: scan ! 23 : scan number real(kind=4) :: posa ! 21 : Position angle
integer(kind=4) :: itype ! 22 : observation type integer(kind=4) :: proc ! 24 : procedure type integer(kind=4) :: subscan ! 22 : Subscan number
real(kind=4) :: houra ! 23 : hour angle [radian] integer(kind=4) :: itype ! 25 : observation type integer(kind=4) :: pad(10) ! 23-32: Pad to 32 words
integer(kind=4) :: project ! 24 : project name real(kind=4) :: houra ! 26 : hour angle [radian]
integer(kind=4) :: pad1 ! 25 : unused word integer(kind=4) :: project(2) ! 27 : project name
integer(kind=4) :: bpc ! 26 : baseline bandpass cal status integer(kind=4) :: bpc ! 29 : baseline bandpass cal status
integer(kind=4) :: ic ! 27 : instrumental cal status integer(kind=4) :: ic ! 30 : instrumental cal status
integer(kind=4) :: recei ! 28 : receiver number integer(kind=4) :: recei ! 31 : receiver number
real(kind=4) :: ut ! 29 : UT [s] real(kind=4) :: ut ! 32 : UT [s]
integer(kind=4) :: pad2(3) ! 30-32: padding to 32 4-bytes word
equivalently
integer(kind=obsnum_length) :: num ! [ ] Observation number
integer(kind=4) :: ver ! [ ] Version number
integer(kind=4) :: teles(3) ! [ ] Telescope name
integer(kind=4) :: dobs ! [MJD-60549] Date of observation
integer(kind=4) :: dred ! [MJD-60549] Date of reduction
integer(kind=4) :: typec ! [ code] Type of coordinates
integer(kind=4) :: kind ! [ code] Type of data
integer(kind=4) :: qual ! [ code] Quality of data
integer(kind=4) :: subscan ! [ ] Subscan number
integer(kind=obsnum_length) :: scan ! [ ] Scan number
"""
"""
index.f90:
call conv%read%i8(data(1), indl%bloc, 1) ! bloc
call conv%read%i4(data(3), indl%word, 1) ! word
call conv%read%i8(data(4), indl%num, 1) ! num
call conv%read%i4(data(6), indl%ver, 1) ! ver
call conv%read%cc(data(7), indl%csour, 3) ! csour
call conv%read%cc(data(10),indl%cline, 3) ! cline
call conv%read%cc(data(13),indl%ctele, 3) ! ctele
call conv%read%i4(data(16),indl%dobs, 1) ! dobs
call conv%read%i4(data(17),indl%dred, 1) ! dred
call conv%read%r4(data(18),indl%off1, 1) ! off1
call conv%read%r4(data(19),indl%off2, 1) ! off2
call conv%read%i4(data(20),indl%type, 1) ! type
call conv%read%i4(data(21),indl%kind, 1) ! kind
call conv%read%i4(data(22),indl%qual, 1) ! qual
call conv%read%r4(data(23),indl%posa, 1) ! posa
call conv%read%i8(data(24),indl%scan, 1) ! scan
call conv%read%i4(data(26),indl%subscan,1) ! subscan
if (isv3) then
call conv%read%r8(data(27),indl%ut, 1) ! ut
else
"""
def _read_indices(f, file_description):
#if file_description['version'] in (1,2):
# extension_positions = (file_description['aex']-1)*file_description['reclen']*4
# all_indices = {extension:
# [_read_index(f,
# filetype=file_description['version'],
# entry=ii,
# #position=position,
# )
# for ii in range(file_description['lex1'])]
# for extension,position in enumerate(extension_positions)
# if position > 0
# }
#elif file_description['version'] == 1:
extension_positions = ((file_description['aex'].astype('int64')-1)
*file_description['reclen']*4)
all_indices = [_read_index(f,
filetype=file_description['version'],
# 1-indexed files
entry_number=ii+1,
file_description=file_description,
)
for ii in range(file_description['xnext']-1)]
#else:
# raise ValueError("Invalid file version {0}".format(file_description['version']))
return all_indices
def _find_index(entry_number, file_description, return_position=False):
if file_description['gex'] == 10:
kex=(entry_number-1)//file_description['lex1'] + 1
else:
# exponential growth:
#kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1
kex = len([xx for xx in file_description['lexn'] if xx<entry_number])
ken = entry_number - file_description['lexn'][kex-1]
#! Find ken (relative entry number in the extension, starts from 1)
#ken = entry_num - file%desc%lexn(kex-1)
kb = ((ken-1)*file_description['lind'])//file_description['reclen']
#kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the
# ! relative record position (as an offset, starts from 0) where the
# ! Entry Index starts. NB: there can be a non-integer number of Entry
# ! Indexes per record
# Subtract 1: 'aex' is 1-indexed
kbl = (file_description['aex'][kex-1]+kb)-1
# kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes
k = ((ken-1)*file_description['lind']) % file_description['reclen']
#k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the
# ! first word of the Entry Index of the entry number 'entry_num'
if return_position:
return (kbl*file_description['reclen']+k)*4
else:
return kbl,k
def _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,
entry_number=None, file_description=None):
if position is not None:
f.seek(position)
if entry_number is not None:
indpos = _find_index(entry_number, file_description, return_position=True)
f.seek(indpos)
x0 = f.tell()
if filetype in ('1A ','v1', 1):
log.debug('Index filetype 1A')
index = {
"XBLOC":_read_int32(f),
"XNUM":_read_int32(f),
"XVER":_read_int32(f),
"XSOURC":_read_word(f,12),
"XLINE":_read_word(f,12),
"XTEL":_read_word(f,12),
"XDOBS":_read_int32(f),
"XDRED":_read_int32(f),
"XOFF1":_read_float32(f),# first offset (real, radians)
"XOFF2":_read_float32(f),# second offset (real, radians)
"XTYPE":_read_int32(f),# coordinate system ('EQ'', 'GA', 'HO')
"XKIND":_read_int32(f),# Kind of observation (0: spectral, 1: continuum, )
"XQUAL":_read_int32(f),# Quality (0-9)
"XSCAN":_read_int32(f),# Scan number
}
index['BLOC'] = index['XBLOC'] # v2 compatibility
index['WORD'] = 1 # v2 compatibility
index['SOURC'] = index['CSOUR'] = index['XSOURC']
index['DOBS'] = index['CDOBS'] = index['XDOBS']
index['CTELE'] = index['XTEL']
index['LINE'] = index['XLINE']
index['OFF1'] = index['XOFF1']
index['OFF2'] = index['XOFF2']
index['QUAL'] = index['XQUAL']
index['SCAN'] = index['XSCAN']
index['KIND'] = index['XKIND']
if clic: # use header set up in clic
nextchunk = {
"XPROC":_read_int32(f),# "procedure type"
"XITYPE":_read_int32(f),#
"XHOURANG":_read_float32(f),#
"XPROJNAME":_read_int32(f),#
"XPAD1":_read_int32(f),
"XBPC" :_read_int32(f),
"XIC" :_read_int32(f),
"XRECEI" :_read_int32(f),
"XUT":_read_float32(f),
"XPAD2":numpy.fromfile(f,count=3,dtype='int32') # BLANK is NOT ALLOWED!!! It is a special KW
}
else:
nextchunk = {"XPOSA":_read_float32(f),
"XSUBSCAN":_read_int32(f),
'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),
}
nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']
nextchunk['POSA'] = nextchunk['XPOSA']
index.update(nextchunk)
if (f.tell() - x0 != 128):
missed_bits = (f.tell()-x0)
X = f.read(128-missed_bits)
if DEBUG: print("read_index missed %i bits: %s" % (128-missed_bits,X))
#raise IndexError("read_index did not successfully read 128 bytes at %i. Read %i bytes." % (x0,f.tell()-x0))
if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):
raise ValueError("Invalid index read from {0}.".format(x0))
elif filetype in ('2A ','v2', 2):
log.debug('Index filetype 2A')
index = {
"BLOC" : _read_int64(f) , #(data(1), 1) ! bloc
"WORD" : _read_int32(f) , #(data(3), 1) ! word
"NUM" : _read_int64(f) , #(data(4), 1) ! num
"VER" : _read_int32(f) , #(data(6), 1) ! ver
"CSOUR" : _read_word(f,12), #(data(7), 3) ! csour
"CLINE" : _read_word(f,12), #(data(10), 3) ! cline
"CTELE" : _read_word(f,12), #(data(13), 3) ! ctele
"DOBS" : _read_int32(f) , #(data(16), 1) ! dobs
"DRED" : _read_int32(f) , #(data(17), 1) ! dred
"OFF1" : _read_float32(f), #(data(18), 1) ! off1
"OFF2" : _read_float32(f), #(data(19), 1) ! off2
"TYPE" : _read_int32(f) , #(data(20), 1) ! type
"KIND" : _read_int32(f) , #(data(21), 1) ! kind
"QUAL" : _read_int32(f) , #(data(22), 1) ! qual
"POSA" : _read_float32(f), #(data(23), 1) ! posa
"SCAN" : _read_int64(f) , #(data(24), 1) ! scan
"SUBSCAN": _read_int32(f) , #(data(26), 1) ! subscan
}
#last24bits = f.read(24)
#log.debug("Read 24 bits: '{0}'".format(last24bits))
if any((is_all_null(index[x]) or not is_ascii(index[x]))
for x in ('CSOUR','CLINE','CTELE')):
raise ValueError("Invalid index read from {0}.".format(x0))
index['SOURC'] = index['XSOURC'] = index['CSOUR']
index['LINE'] = index['XLINE'] = index['CLINE']
index['XKIND'] = index['KIND']
try:
index['DOBS'] = index['XDOBS'] = index['CDOBS']
except KeyError:
index['CDOBS'] = index['XDOBS'] = index['DOBS']
else:
raise NotImplementedError("Filetype {0} not implemented.".format(filetype))
# from kernel/lib/gsys/date.f90: gag_julda
index['MJD'] = index['DOBS'] + 60549
class_dobs = index['DOBS']
index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)
# SLOW
#index['DATEOBS'] = Time(index['DOBS'], format='jyear')
#index['DATEOBSS'] = index['DATEOBS'].iso
log.debug("Indexing finished at {0}".format(f.tell()))
return index
def _read_header(f, type=0, position=None):
"""
Read a header entry from a CLASS file
(helper function)
"""
if position is not None:
f.seek(position)
if type in keys_lengths:
hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])
for x in keys_lengths[type]]
return dict(hdrsec)
else:
return {}
raise ValueError("Unrecognized type {0}".format(type))
def _read_first_record(f):
f.seek(0)
filetype = f.read(4)
if fileversion_dict[filetype] == 'v1':
return _read_first_record_v1(f)
elif fileversion_dict[filetype] == 'v2':
return _read_first_record_v2(f)
else:
raise ValueError("Unrecognized filetype {0}".format(filetype))
def _read_first_record_v1(f, record_length_words=128):
r"""
Position & Parameter & Fortran Kind & Purpose \\
\hline
1 & {\tt code} & Character*4 & File code \\
2 & {\tt next} & Integer*4 & Next free record \\
3 & {\tt lex} & Integer*4 & Length of first extension (number of entries) \\
4 & {\tt nex} & Integer*4 & Number of extensions \\
5 & {\tt xnext} & Integer*4 & Next available entry number \\
6:2*{\tt reclen} & {\tt ex(:)} & Integer*4 & Array of extension addresses
from classic_mod.f90:
integer(kind=4) :: code ! 1 File code
integer(kind=4) :: next ! 2 Next free record
integer(kind=4) :: lex ! 3 Extension length (number of entries)
integer(kind=4) :: nex ! 4 Number of extensions
integer(kind=4) :: xnext ! 5 Next available entry number
integer(kind=4) :: aex(mex_v1) ! 6:256 Extension addresses
from old (<dec2013) class, file.f90:
read(ilun,rec=1,err=11,iostat=ier) ibx%code,ibx%next, &
& ibx%ilex,ibx%imex,ibx%xnext
also uses filedesc_v1tov2 from classic/lib/file.f90
"""
# OLD NOTES
# hdr = header
# hdr.update(obshead) # re-overwrite things
# hdr.update({'OBSNUM':obsnum,'RECNUM':spcount})
# hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
# hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
# hdr.update({'OBJECT':hdr['SOURC'].strip()})
# hdr.update({'BUNIT':'Tastar'})
# hdr.update({'EXPOSURE':hdr['TIME']})
f.seek(0)
file_description = {
'code': f.read(4),
'next': _read_int32(f),
'lex': _read_int32(f),
'nex': _read_int32(f),
'xnext': _read_int32(f),
'gex': 10.,
'vind': 1, # classic_vind_v1 packages/classic/lib/classic_mod.f90
'version': 1,
'nextrec': 3,
'nextword': 1,
'lind': 32, #classic_lind_v1 packages/classic/lib/classic_mod.f90
'kind': 'unknown',
'flags': 0,
}
file_description['reclen'] = record_length_words # should be 128w = 512 bytes
ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')
file_description['ex'] = ex[ex!=0]
file_description['nextrec'] = file_description['next'] # this can't be...
file_description['lex1'] = file_description['lex'] # number of entries
file_description['lexn'] = (np.arange(file_description['nex']+1) *
file_description['lex1'])
file_description['nentries'] = np.sum(file_description['lexn'])
file_description['aex'] = file_description['ex'][:file_description['nex']]
#file_description['version'] = fileversion_dict[file_description['code']]
assert f.tell() == 1024
# Something is not quite right with the 'ex' parsing
#assert len(file_description['ex']) == file_description['nex']
return file_description
def _read_first_record_v2(f):
r""" packages/classic/lib/file.f90
Position & Parameter & Fortran Kind & Purpose & Unit \\
\hline
1 & {\tt code} & Character*4 & File code & - \\
2 & {\tt reclen} & Integer*4 & Record length & words \\
3 & {\tt kind} & Integer*4 & File kind & - \\
4 & {\tt vind} & Integer*4 & Index version & - \\
5 & {\tt lind} & Integer*4 & Index length & words \\
6 & {\tt flags} & Integer*4 & Bit flags. \#1: single or multiple, & - \\
& & & \#2-32: provision (0-filled) & \\
\hline
7:8 & {\tt xnext} & Integer*8 & Next available entry number & - \\
9:10 & {\tt nextrec} & Integer*8 & Next record which contains free space & record \\
11 & {\tt nextword} & Integer*4 & Next free word in this record & word \\
\hline
12 & {\tt lex1} & Integer*4 & Length of first extension index & entries \\
13 & {\tt nex} & Integer*4 & Number of extensions & - \\
14 & {\tt gex} & Integer*4 & Extension growth rule & - \\
15:{\tt reclen} & {\tt aex(:)} & Integer*8 & Array of extension addresses & record
"""
f.seek(0)
file_description = {
'code': f.read(4),
'reclen': _read_int32(f),
'kind': _read_int32(f),
'vind': _read_int32(f),
'lind': _read_int32(f),
'flags': _read_int32(f),
'xnext': _read_int64(f),
'nextrec': _read_int64(f),
'nextword': _read_int32(f),
'lex1': _read_int32(f),
'nex': _read_int32(f),
'gex': _read_int32(f),
}
file_description['lexn'] = [0]
if file_description['gex'] == 10:
for ii in range(1, file_description['nex']+1):
file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])
else:
#! Exponential growth. Only growth with mantissa 2.0 is supported
for ii in range(1, file_description['nex']):
# I don't know what the fortran does here!!!
# ahh, maybe 2_8 means int(2, dtype='int64')
nent = int(file_description['lex1'] * 2**(ii-1))
#nent = int(file%desc%lex1,kind=8) * 2_8**(iex-1)
file_description['lexn'].append(file_description['lexn'][-1]+nent)
#file%desc%lexn(iex) = file%desc%lexn(iex-1) + nent
file_description['nentries'] = np.sum(file_description['lexn'])
record_length_words = file_description['reclen']
aex = numpy.fromfile(f, count=(record_length_words-15)//2, dtype='int64')
file_description['aex'] = aex[aex!=0]
assert len(file_description['aex']) == file_description['nex']
file_description['version'] = 2
return file_description
def gi8_dicho(ninp,lexn,xval,ceil=True):
"""
! @ public
! Find ival such as
! X(ival-1) < xval <= X(ival) (ceiling mode)
! or
! X(ival) <= xval < X(ival+1) (floor mode)
! for input data ordered. Use a dichotomic search for that.
call gi8_dicho(nex,file%desc%lexn,entry_num,.true.,kex,error)
"""
#integer(kind=size_length), intent(in) :: np ! Number of input points
#integer(kind=8), intent(in) :: x(np) ! Input ordered Values
#integer(kind=8), intent(in) :: xval ! The value we search for
#logical, intent(in) :: ceil ! Ceiling or floor mode?
#integer(kind=size_length), intent(out) :: ival ! Position in the array
#logical, intent(inout) :: error ! Logical error flag
iinf = 1
isup = ninp
#! Ceiling mode
while isup > (iinf+1):
imid = int(np.floor((isup + iinf)/2.))
if (lexn[imid-1] < xval):
iinf = imid
else:
isup = imid
ival = isup
return ival
def _read_obshead(f, file_description, position=None, verbose=False):
if file_description['version'] == 1:
return _read_obshead_v1(f, position=position, verbose=verbose)
if file_description['version'] == 2:
return _read_obshead_v2(f, position=position)
else:
raise ValueError("Invalid file version {0}.".
format(file_description['version']))
def _read_obshead_v2(f, position=None):
"""
! Version 2 (public)
integer(kind=4), parameter :: entrydescv2_nw1=11 ! Number of words, in 1st part
integer(kind=4), parameter :: entrydescv2_nw2=5 ! Number of words for 1 section in 2nd part
type classic_entrydesc_t
sequence
integer(kind=4) :: code ! 1 : code observation icode
integer(kind=4) :: version ! 2 : observation version
integer(kind=4) :: nsec ! 3 : number of sections
integer(kind=4) :: pad1 ! - : memory padding (not in data)
integer(kind=8) :: nword ! 4- 5: number of words
integer(kind=8) :: adata ! 6- 7: data address
integer(kind=8) :: ldata ! 8- 9: data length
integer(kind=8) :: xnum ! 10-11: entry number
! Out of the 'sequence' block:
integer(kind=4) :: msec ! Not in data: maximum number of sections the
! Observation Index can hold
integer(kind=4) :: pad2 ! Memory padding for 8 bytes alignment
integer(kind=4) :: seciden(classic_maxsec) ! Section Numbers (on disk: 1 to ed%nsec)
integer(kind=8) :: secleng(classic_maxsec) ! Section Lengths (on disk: 1 to ed%nsec)
integer(kind=8) :: secaddr(classic_maxsec) ! Section Addresses (on disk: 1 to ed%nsec)
end type classic_entrydesc_t
"""
if position is not None:
f.seek(position)
else:
position = f.tell()
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(position))
f.seek(position)
entrydescv2_nw1 = 11
entrydescv2_nw2 = 5
obshead = {
'CODE': f.read(4),
'VERSION': _read_int32(f),
'NSEC': _read_int32(f),
#'_blank': _read_int32(f),
'NWORD': _read_int64(f),
'ADATA': _read_int64(f),
'LDATA': _read_int64(f),
'XNUM': _read_int64(f),
#'MSEC': _read_int32(f),
#'_blank2': _read_int32(f),
}
section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')
section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))
def _read_obshead_v1(f, position=None, verbose=False):
"""
Read the observation header of a CLASS file
(helper function for read_class; should not be used independently)
"""
if position is not None:
f.seek(position)
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(f.tell() - 4))
(nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,
obsnum) = numpy.fromfile(f, count=8, dtype='int32')
if verbose:
print("nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)
print("DATA_LENGTH: ",data_length)
seccodes = numpy.fromfile(f,count=nsec,dtype='int32')
# Documentation says addresses then length: It is apparently wrong
seclen = numpy.fromfile(f,count=nsec,dtype='int32')
secaddr = numpy.fromfile(f,count=nsec,dtype='int32')
if verbose:
print("Section codes, addresses, lengths: ",seccodes,secaddr,seclen)
hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,
'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,
'NSEC':nsec, 'OBSNUM':obsnum}
#return obsnum,seccodes
return obsnum,hdr,dict(zip(seccodes,secaddr))
# THIS IS IN READ_OBSHEAD!!!
# def _read_preheader(f):
# """
# Not entirely clear what this is, but it is stuff that precedes the actual data
#
# Looks something like this:
# array([ 1, -2, -3, -4, -14,
# 9, 17, 18, 25, 55,
# 64, 81, 99, -1179344801, 979657591,
#
# -2, -3, -4, -14 indicate the 4 header types
# 9,17,18,25 *MAY* indicate the number of bytes in each
#
#
# HOW is it indicated how many entries there are?
# """
# # 13 comes from counting 1, -2,....99 above
# numbers = np.fromfile(f, count=13, dtype='int32')
# sections = [n for n in numbers if n in header_id_numbers]
# return sections
def downsample_1d(myarr,factor,estimator=np.mean, weight=None):
"""
Downsample a 1D array by averaging over *factor* pixels.
Crops right side if the shape is not a multiple of factor.
This code is pure numpy and should be fast.
keywords:
estimator - default to mean. You can downsample by summing or
something else if you want a different estimator
(e.g., downsampling error: you want to sum & divide by sqrt(n))
weight: np.ndarray
An array of weights to use for the downsampling. If None,
assumes uniform 1
"""
if myarr.ndim != 1:
raise ValueError("Only works on 1d data. Says so in the title.")
xs = myarr.size
crarr = myarr[:xs-(xs % int(factor))]
if weight is None:
dsarr = estimator(np.concatenate([[crarr[i::factor] for i in
range(factor)]]),axis=0)
else:
dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in
range(factor)]]),axis=0)
warr = estimator(np.concatenate([[weight[i::factor] for i in
range(factor)]]),axis=0)
dsarr = dsarr/warr
return dsarr
# unit test
def test_downsample1d():
data = np.arange(10)
weight = np.ones(10)
weight[5]=0
assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==
np.array([0.5, 2.5, 4.0, 6.5, 8.5]))
def read_observation(f, obsid, file_description=None, indices=None,
my_memmap=None, memmap=True, verbose=False):
if isinstance(f, str):
f = open(f,'rb')
opened = True
if memmap:
my_memmap = numpy.memmap(f, offset=0, dtype='float32',
mode='r')
else:
my_memmap = None
elif my_memmap is None and memmap:
raise ValueError("Must pass in a memmap object if passing in a file object.")
else:
opened = False
if file_description is None:
file_description = _read_first_record(f)
if indices is None:
indices = _read_indices(f, file_description)
index = indices[obsid]
obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4
log.debug("Reading observation at position {0}".format(obs_position))
obsnum,obshead,sections = _read_obshead(f, file_description,
position=obs_position,
verbose=verbose)
header = obshead
datastart = 0
for section_id,section_address in iteritems(sections):
# Section addresses are 1-indexed byte addresses
# in the current "block"
sec_position = obs_position + (section_address-1)*4
temp_hdr = _read_header(f, type=header_id_numbers[section_id],
position=sec_position)
header.update(temp_hdr)
datastart = max(datastart,f.tell())
hdr = header
hdr.update(obshead) # re-overwrite things
hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})
hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
hdr.update({'OBJECT':hdr['SOURC'].strip()})
hdr.update({'BUNIT':'Tastar'})
hdr.update({'EXPOSURE':float(hdr['TIME'])})
hdr['HDRSTART'] = obs_position
hdr['DATASTART'] = datastart
hdr.update(indices[obsid])
# Define MJD as mid-exposure time in MJD
hdr.update({'OBSDATE': hdr['MJD'] + hdr['UT']/2./pi})
# Apparently the data are still valid in this case?
#if hdr['XNUM'] != obsid+1:
# log.error("The spectrum read was {0} but {1} was requested.".
# format(hdr['XNUM']-1, obsid))
if hdr['KIND'] == 1: # continuum
nchan = hdr['NPOIN']
elif 'NCHAN' in hdr:
nchan = hdr['NCHAN']
else:
log.error("No NCHAN in header. This is not a spectrum.")
import ipdb; ipdb.set_trace()
# There may be a 1-channel offset? CHECK!!!
# (changed by 1 pixel - October 14, 2014)
# (changed back - October 21, 2014 - I think the ends are just bad, but not
# zero.)
f.seek(datastart-1)
spec = _read_spectrum(f, position=datastart-1, nchan=nchan,
memmap=memmap, my_memmap=my_memmap)
if opened:
f.close()
return spec, hdr
def _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):
if position != f.tell():
log.warning("Reading data from {0}, but the file is wound "
"to {1}.".format(position, f.tell()))
if memmap:
here = position
#spectrum = numpy.memmap(filename, offset=here, dtype='float32',
# mode='r', shape=(nchan,))
spectrum = my_memmap[here//4:here//4+nchan]
f.seek(here+nchan*4)
else:
f.seek(position)
spectrum = numpy.fromfile(f,count=nchan,dtype='float32')
return spectrum
def _spectrum_from_header(fileobj, header, memmap=None):
return _read_spectrum(fileobj, position=header['DATASTART'],
nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],
my_memmap=memmap)
def clean_header(header):
newheader = {}
for k in header:
if not isinstance(header[k], (int, float, str)):
if isinstance(header[k], np.ndarray) and header[k].size > 1:
if header[k].size > 10:
raise ValueError("Large array being put in header. That's no good. key={0}".format(k))
for ii,val in enumerate(header[k]):
newheader[k[:7]+str(ii)] = val
else:
newheader[k[:8]] = str(header[k])
else:
newheader[k[:8]] = header[k]
return newheader
class ClassObject(object):
def __init__(self, filename, verbose=False):
t0 = time.time()
self._file = open(filename, 'rb')
self.file_description = _read_first_record(self._file)
self.allind = _read_indices(self._file, self.file_description)
self._data = np.memmap(self._file, dtype='float32', mode='r')
if verbose: log.info("Setting _spectra")
self._spectra = LazyItem(self)
t1 = time.time()
if verbose: log.info("Setting posang. t={0}".format(t1-t0))
self.set_posang()
t2 = time.time()
if verbose: log.info("Identifying otf scans. t={0}".format(t2-t1))
self._identify_otf_scans(verbose=verbose)
t3 = time.time()
#self._load_all_spectra()
if verbose:
log.info("Loaded CLASS object with {3} indices. Time breakdown:"
" {0}s for indices, "
"{1}s for posang, and {2}s for OTF scan identification"
.format(t1-t0, t2-t1, t3-t2, len(self.allind)))
def __repr__(self):
s = "\n".join(["{k}: {v}".format(k=k,v=v)
for k,v in iteritems(self.getinfo())])
return "ClassObject({id}) with {nspec} entries\n".format(id=id(self),
nspec=len(self.allind)) + s
def getinfo(self, allsources=False):
info = dict(
tels = self.tels,
lines = self.lines,
scans = self.scans,
sources = self.sources if allsources else self.sci_sources,
)
return info
def set_posang(self):
h0 = self.headers[0]
for h in self.headers:
dx = h['OFF1'] - h0['OFF1']
dy = h['OFF2'] - h0['OFF2']
h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi
h0 = h
def _identify_otf_scans(self, verbose=False):
h0 = self.allind[0]
st = 0
otfscan = 0
posangs = [h['COMPPOSA'] for h in self.allind]
if verbose:
pb = ProgressBar(len(self.allind))
for ii,h in enumerate(self.allind):
if (h['SCAN'] != h0['SCAN']
or h['SOURC'] != h0['SOURC']):
h0['FIRSTSCAN'] = st
cpa = np.median(posangs[st:ii])
for hh in self.allind[st:ii]:
hh['SCANPOSA'] = cpa % 180
st = ii
if h['SCAN'] == h0['SCAN']:
h0['OTFSCAN'] = otfscan
otfscan += 1
h['OTFSCAN'] = otfscan
else:
otfscan = 0
h['OTFSCAN'] = otfscan
else:
h['OTFSCAN'] = otfscan
if verbose:
pb.update(ii)
def listscans(self, source=None, telescope=None, out=sys.stdout):
minid=0
scan = -1
sourc = ""
#tel = ''
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
print("{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} "
"[ {RAmin:>12s}, {RAmax:>12s} ] "
"[ {DECmin:>12s}, {DECmax:>12s} ] "
"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}"
.format(entries='Scans', SOURC='Source', XTEL='Telescope',
SCAN='Scan', SUBSCAN='Subscan',
RAmin='min(RA)', RAmax='max(RA)',
DECmin='min(DEC)', DECmax='max(DEC)',
SCANPOSA='Scan PA',
angle='Angle', OTFSCAN='OTFscan',
TSYS='TSYS', UTD='UTD'),
file=out)
data_rows = []
for ii,row in enumerate(self.headers):
if (row['SCAN'] == scan
and row['SOURC'] == sourc
#and row['XTEL'] == tel
):
minoff1 = min(minoff1, row['OFF1'])
maxoff1 = max(maxoff1, row['OFF1'])
minoff2 = min(minoff2, row['OFF2'])
maxoff2 = max(maxoff2, row['OFF2'])
ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],
row['OFF1'] - prevrow['OFF1'])%np.pi
nangle += 1
prevrow = row
else:
if scan == -1:
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
prevrow = row
continue
ok = True
if source is not None:
if isinstance(source, (list,tuple)):
ok = ok and any(re.search((s), prevrow['SOURC'])
for s in source)
else:
ok = ok and re.search((source), prevrow['SOURC'])
if telescope is not None:
ok = ok and re.search((telescope), prevrow['XTEL'])
if ok:
data = dict(RAmin=minoff1*180/np.pi*3600,
RAmax=maxoff1*180/np.pi*3600,
DECmin=minoff2*180/np.pi*3600,
DECmax=maxoff2*180/np.pi*3600,
angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,
e0=minid,
e1=ii-1,
#TSYS=row['TSYS'] if 'TSYS' in row else '--',
UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,
**prevrow)
print("{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} "
"[ {RAmin:12f}, {RAmax:12f} ] "
"[ {DECmin:12f}, {DECmax:12f} ] "
"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}"
" {TSYS:>8.1f} {UTD:12f}".
format(**data),
file=out)
data_rows.append(data)
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
minid = ii
return data
@property
def tels(self):
if hasattr(self,'_tels'):
return self._tels
else:
self._tels = set([h['CTELE'] for h in self.allind])
#testing if CTELE even works
return self._tels
@property
def sources(self):
if hasattr(self,'_source'):
return self._source
else:
self._source = set([h['SOURC'] for h in self.allind])
return self._source
@property
def scans(self):
if hasattr(self,'_scan'):
return self._scan
else:
self._scan = set([h['SCAN'] for h in self.allind])
return self._scan
@property
def sci_sources(self):
return set([s for s in self.sources
if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',
'COLD')])
@property
def lines(self):
if hasattr(self,'_lines'):
return self._lines
else:
self._lines = set([h['LINE'] for h in self.allind])
return self._lines
def _load_all_spectra(self, indices=None):
if indices is None:
indices = range(self.file_description['xnext']-1)
if hasattr(self, '_loaded_indices'):
indices_set = set(indices)
indices_to_load = (indices_set.difference(self._loaded_indices))
self._loaded_indices = self._loaded_indices.union(indices_set)
if any(indices_to_load):
pb = ProgressBar(len(indices_to_load))
for ii,k in enumerate(xrange(indices_to_load)):
self._spectra[k]
pb.update(ii)
else:
self._loaded_indices = set(indices)
self._spectra.load_all()
@property
def spectra(self):
return [x[0] for x in self._spectra]
@property
def headers(self):
return [self._spectra[ii][1]
if ii in self._spectra else x
for ii,x in enumerate(self.allind)]
def select_spectra(self,
all=None,
line=None,
linere=None,
linereflags=re.IGNORECASE,
number=None,
scan=None,
offset=None,
source=None,
sourcere=None,
sourcereflags=re.IGNORECASE,
range=None,
quality=None,
telescope=None,
telescopere=None,
telescopereflags=re.IGNORECASE,
subscan=None,
entry=None,
posang=None,
#observed=None,
#reduced=None,
frequency=None,
section=None,
user=None,
include_old_versions=False,
):
"""
Parameters
----------
include_old_versions: bool
Include spectra with XVER numbers <0? These are CLASS spectra that
have been "overwritten" (re-reduced?)
"""
if entry is not None and len(entry)==2:
return irange(entry[0], entry[1])
if frequency is not None:
self._load_all_spectra()
sel = [(re.search(re.escape(ensure_bytes(line)), h['LINE'], re.IGNORECASE)
if line is not None else True) and
(re.search(ensure_bytes(linere), h['LINE'], linereflags)
if linere is not None else True) and
(h['SCAN'] == scan if scan is not None else True) and
((h['OFF1'] == offset or
h['OFF2'] == offset) if offset is not None else True) and
(re.search(re.escape(ensure_bytes(source)), h['CSOUR'], re.IGNORECASE)
if source is not None else True) and
(re.search(ensure_bytes(sourcere), h['CSOUR'], sourcereflags)
if sourcere is not None else True) and
(h['OFF1']>range[0] and h['OFF1'] < range[1] and
h['OFF2']>range[2] and h['OFF2'] < range[3]
if range is not None and len(range)==4 else True) and
(h['QUAL'] == quality if quality is not None else True) and
(re.search(re.escape(ensure_bytes(telescope)), h['CTELE'], re.IGNORECASE)
if telescope is not None else True) and
(re.search(ensure_bytes(telescopere), h['CTELE'], telescopereflags)
if telescopere is not None else True) and
(h['SUBSCAN']==subscan if subscan is not None else True) and
('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way
h['RESTF'] > frequency[0] and
h['RESTF'] < frequency[1]
if frequency is not None and len(frequency)==2
else True) and
(h['COMPPOSA']%180 > posang[0] and
h['COMPPOSA']%180 < posang[1]
if posang is not None and len(posang)==2
else True) and
# 1A uses XVER, 2A uses VER. If neither are present, it's
# probably not a valid spectrum?
(h.get('XVER', h.get('VER', -999)) > 0
if not include_old_versions else True)
for h in self.headers
]
return [ii for ii,k in enumerate(sel) if k]
def get_spectra(self, progressbar=True, **kwargs):
selected_indices = self.select_spectra(**kwargs)
if not any(selected_indices):
raise ValueError("Selection yielded empty.")
self._spectra.load(selected_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in selected_indices]
def get_pyspeckit_spectra(self, progressbar=True, **kwargs):
spdata = self.get_spectra(progressbar=progressbar, **kwargs)
spectra = [pyspeckit.Spectrum(data=data,
xarr=make_axis(header),
header=clean_header(header))
for data,header in spdata]
return spectra
def read_observations(self, observation_indices, progressbar=True):
self._spectra.load(observation_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in observation_indices]
@print_timing
def read_class(filename, downsample_factor=None, sourcename=None,
telescope=None, line=None, posang=None, verbose=False,
flag_array=None):
"""
Read a binary class file.
Based on the
`GILDAS CLASS file type Specification
<http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html>`_
Parameters
----------
filename: str
downsample_factor: None or int
Factor by which to downsample data by averaging. Useful for
overresolved data.
sourcename: str or list of str
Source names to match to the data (uses regex)
telescope: str or list of str
'XTEL' or 'TELE' parameters: the telescope & instrument
line: str or list of str
The line name
posang: tuple of 2 floats
The first float is the minimum value for the position angle. The second
float is the maximum value for the position angle.
verbose: bool
Log messages with severity INFO
flag_array: np.ndarray
An array with the same shape as the data used to flag out
(remove) data when downsampling. True = flag out
"""
classobj = ClassObject(filename)
if not isinstance(sourcename, (list,tuple)):
sourcename = [sourcename]
if not isinstance(telescope, (list,tuple)):
telescope = [telescope]
if not isinstance(line, (list,tuple)):
line = [line]
spectra,headers = [],[]
if verbose:
log.info("Reading...")
selection = [ii
for source in sourcename
for tel in telescope
for li in line
for ii in classobj.select_spectra(sourcere=source,
telescope=tel,
line=li,
posang=posang)]
sphdr = classobj.read_observations(selection)
if len(sphdr) == 0:
return None
spec,hdr = zip(*sphdr)
spectra += spec
headers += hdr
indexes = headers
weight = ~flag_array if flag_array is not None else None
if downsample_factor is not None:
if verbose:
log.info("Downsampling...")
spectra = [downsample_1d(spec, downsample_factor,
weight=weight)
for spec in ProgressBar(spectra)]
headers = [downsample_header(h, downsample_factor)
for h in ProgressBar(headers)]
for hdr in headers:
stringify_header(hdr)
return spectra,headers,indexes
def stringify_header(header):
from six import string_types, integer_types
import string
FITS_allowed_types = (string_types + integer_types +
(float, complex, bool, np.floating, np.integer,
np.complexfloating, np.bool_))
bad_chars = string.printable[96:]
badcharre = re.compile("[{0}]".format(bad_chars))
for key, value in header.items():
if isinstance(value, bytes):
header[key] = value.decode()
elif not isinstance(value, FITS_allowed_types):
header[key] = badcharre.sub("", str(header[key]))
def downsample_header(hdr, downsample_factor):
for k in ('NCHAN','NPOIN','DATALEN'):
if k in hdr:
hdr[k] = int((hdr[k] / downsample_factor))
# maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1
scalefactor = 1./downsample_factor
hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.
for kw in ['FRES','VRES']:
if kw in hdr:
hdr[kw] *= downsample_factor
return hdr
def make_axis(header,imagfreq=False):
"""
Create a :class:`pyspeckit.spectrum.units.SpectroscopicAxis` from the CLASS "header"
"""
from .. import units
rest_frequency = header.get('RESTF')
xunits = 'MHz'
nchan = header.get('NCHAN')
voff = header.get('VOFF')
foff = header.get('FOFF')
doppler = header.get('DOPPLER')
fres = header.get('FRES')
refchan = header.get('RCHAN')
imfreq = header.get('IMAGE')
if foff in (None, 0.0) and voff not in (None, 0.0):
# Radio convention
foff = -voff/2.997924580e5 * rest_frequency
if not imagfreq:
xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)
else:
xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)
return XAxis
@print_timing
def class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,
imagfreq=False, DEBUG=False, **kwargs):
"""
Load an entire CLASS observing session into a list of ObsBlocks based on
matches to the 'telescope', 'line' and 'source' names
Parameters
----------
filename : string
The Gildas CLASS data file to read the spectra from.
telescope : list
List of telescope names to be matched.
line : list
List of line names to be matched.
source : list (optional)
List of source names to be matched. Defaults to None.
imagfreq : bool
Create a SpectroscopicAxis with the image frequency.
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
obslist = []
lastscannum = -1
spectrumlist = None
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
# this is slow but necessary...
H = pyfits.Header()
for k,v in iteritems(hdr):
if hasattr(v,"__len__") and not isinstance(v,str):
# make an array of header entries, but this
# supports only up to 10 of them...
if len(v) > 1:
if len(v) < 10:
for ii,vv in enumerate(v):
newkey = k[:7]+str(ii)
H[newkey] = vv
elif len(v) < 100:
for ii,vv in enumerate(v):
newkey = k[:6]+str(ii)
H[newkey] = vv
else:
raise ValueError("Too many entries for {0}".format(k))
else:
H[k] = v[0]
#elif not any(x in str(v).lower() for x in ('comment', 'end', 'history')):
# # do not try to add comments...
# This commented out block used to attempt to reject comments
# using a private regex in the old pyfits which no longer exists.
# I don't know if it was necessary.
else:
H[k] = v
scannum = hdr['SCAN']
if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:
continue
if hdr['LINE'].strip() not in line:
continue
if (source is not None) and (hdr['SOURC'].strip() not in source):
continue
hdr['RESTFREQ'] = hdr.get('RESTF')
H['RESTFREQ'] = hdr.get('RESTF')
#print "Did not skip %s,%s. Scannum, last: %i,%i" % (hdr['XTEL'],hdr['LINE'],scannum,lastscannum)
if scannum != lastscannum:
lastscannum = scannum
if spectrumlist is not None:
obslist.append(pyspeckit.ObsBlock(spectrumlist))
xarr = make_axis(hdr,imagfreq=imagfreq)
spectrumlist = [(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))]
else:
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))
return obslist
class LazyItem(object):
"""
Simple lazy spectrum-retriever wrapper
"""
def __init__(self, parent):
self.parent = parent
self.sphdr = {}
self.nind = len(self.parent.allind)
self.nloaded = 0
def __repr__(self):
return ("Set of {0} spectra & headers, {1} loaded"
" ({2:0.2f}%)".format(self.nind, self.nloaded,
(float(self.nloaded)/self.nind)*100))
def load_all(self, progressbar=True):
self.load(range(self.nind))
def load(self, indices, progressbar=True):
pb = ProgressBar(len(indices))
counter = 0
for k in indices:
self[k]
counter += 1
pb.update(counter)
def __getitem__(self, key):
if key in self.sphdr:
return self.sphdr[key]
elif isinstance(key, slice):
return [self[k] for k in xrange(key.start or 0,
key.end or len(self.parent.allind),
key.step or 1)]
else:
sphd = read_observation(self.parent._file, key,
file_description=self.parent.file_description,
indices=self.parent.allind,
my_memmap=self.parent._data)
# Update the header with OTFSCAN and POSANG info
sphd[1].update(self.parent.allind[key])
self.sphdr[key] = sphd
self.nloaded += 1
return sphd
def __iter__(self):
return self.next()
def __next__(self):
for k in self.spheader:
yield self.spheader[k]
def __contains__(self, key):
return key in self.sphdr
@print_timing
def class_to_spectra(filename, datatuple=None, **kwargs):
"""
Load each individual spectrum within a CLASS file into a list of Spectrum
objects
"""
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
spectrumlist = []
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
xarr = make_axis(hdr)
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=hdr,
data=sp))
return pyspeckit.Spectra(spectrumlist)
def tests():
"""
Tests are specific to the machine on which this code was developed.
"""
fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'
#fn1 = '/Users/adam/work/bolocam/hht/class_001.smt'
#fn1 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-20824-073.cls'
#fn2 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-79472+203.cls'
#F1 = read_class(fn1)#,DEBUG=True)
#F2 = read_class(fn2)
n2hp = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])
hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)'])
| 42.555756 | 220 | 0.526813 | from __future__ import print_function
from six.moves import xrange
from six import iteritems
import six
import astropy.io.fits as pyfits
import numpy
import numpy as np
from numpy import pi
from astropy import log
from astropy import units as u
import pyspeckit
import sys
import re
try:
from astropy.utils.console import ProgressBar
except ImportError:
ProgressBar = lambda x: None
ProgressBar.update = lambda x: None
import struct
import time
irange = range
def print_timing(func):
def wrapper(*arg,**kwargs):
t1 = time.time()
res = func(*arg,**kwargs)
t2 = time.time()
log.info('%s took %0.5g s' % (func.__name__, (t2-t1)))
return res
wrapper.__doc__ = func.__doc__
return wrapper
def ensure_bytes(string):
if six.PY3:
return bytes(string, 'utf-8')
else:
return str(string)
filetype_dict = {'1A ':'Multiple_IEEE',
'1 ':'Multiple_Vax',
'1B ':'Multiple_EEEI',
'2A ':'v2',
'2 ':'v2',
'2B ':'v2',
'9A ':'Single_IEEE',
'9 ':'Single_Vax',
'9B ':'Single_EEEI'}
for key in list(filetype_dict.keys()):
filetype_dict[ensure_bytes(key)] = filetype_dict[key]
fileversion_dict = {'1A ':'v1',
'2A ':'v2',
'9A ':'v1',
}
for key in list(fileversion_dict.keys()):
fileversion_dict[ensure_bytes(key)] = fileversion_dict[key]
record_lengths = {'1A': 512,
'2A': 1024*4}
header_id_numbers = {0: 'USER CODE',
-1: 'COMMENT',
-2: 'GENERAL',
-3: 'POSITION',
-4: 'SPECTRO',
-5: 'BASELINE',
-6: 'HISTORY',
-7: 'UNKNOWN-APEX',
-9: 'GAUSSFIT',
-10: 'DRIFT',
-11: 'BEAMSWITCH',
-12: 'SHELLFIT',
-13: 'NH3FIT',
-14: 'CALIBRATION',
-18: 'ABSFIT',
}
header_id_lengths = {-2: 9,
-3: 17,
-4: 17,
-5: None,
-6: 3,
-14: 25,
}
filedescv2_nw1=14
keys_lengths = {
'unknown': [
'int32'),
('TELES' ,3,'|S12') ,
('DOBS' ,1,'int32'),
('DRED' ,1,'int32'),
('TYPEC' ,1,'int32'),
('KIND' ,1,'int32'),
('QUAL' ,1,'int32'),
('SCAN' ,1,'int32'),
('SUBSCAN' ,1,'int32'),
],
'COMMENT': [
('LTEXT',1,'int32'),
('CTEXT',1024//4,'|S1024'),
],
'GENERAL': [
('UT' ,2,'float64'),
('ST' ,2,'float64'),
('AZ' ,1,'float32'),
('EL' ,1,'float32'),
('TAU' ,1,'float32'),
('TSYS' ,1,'float32'),
('TIME' ,1,'float32'),
C',3,'|S12') ,
('EPOCH',1,'float32'),
('LAM' ,2,'float64'),
('BET' ,2,'float64'),
('LAMOF',1,'float32'),
('BETOF',1,'float32'),
('PROJ' ,1,'int32') ,
('SL0P' ,1,'float64'),
('RESTF' ,2,'float64'),
('NCHAN' ,1,'int32'),
('RCHAN' ,1,'float32'),
('FRES' ,1,'float32'),
('FOFF' ,1,'float32'),
('VRES' ,1,'float32'),
('VOFF' ,1,'float32'),
('BAD' ,1,'float32'),
'),
,
('DOPPLER',2,'float64'),
],
'CALIBRATION': [
('ALIGN',1,'int32'),
('BEEFF',1,'float32'), # [ ] Beam efficiency
('FOEFF',1,'float32'), # [ ] Forward efficiency
('GAINI',1,'float32'), # [ ] Image/Signal gain ratio
('H2OMM',1,'float32'), # [ mm] Water vapor content
('PAMB',1,'float32'), # [ hPa] Ambient pressure
('TAMB',1,'float32'), # [ K] Ambient temperature
('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band
('TCHOP',1,'float32'), # [ K] Chopper temperature
('TCOLD',1,'float32'), # [ K] Cold load temperature
('TAUS',1,'float32'), # [neper] Opacity in signal band
('TAUI',1,'float32'), # [neper] Opacity in image band
('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band
('TREC',1,'float32'), # [ K] Receiver temperature
('CMODE',1,'int32'), # [ code] Calibration mode
('ATFAC',1,'float32'), # [ ] Applied calibration factor
('ALTI',1,'float32'), # [ m] Site elevation
('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold
('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement
('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement
('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS
('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS
],
'BASELINE':[
('DEG',1,'int32'), #! [ ] Degree of last baseline
('SIGFI',1,'float32'), #! [Int. unit] Sigma
('AIRE',1,'float32'), #! [Int. unit] Area under windows
('NWIND',1,'int32'), #! [ ] Number of line windows
# WARNING: These should probably have 'n', the second digit, = NWIND
# The docs are really unclear about this, they say "W1(MWIND)"
('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows
('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows
('SINUS',3,'float32'), #![] Sinus baseline results
],
'DRIFT':[ # 16?
('FREQ',1,'float64') , #! [ MHz] Rest frequency real(kind=8) ::
('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::
('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::
('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::
('TREF',1,'float32') , #! [ ?] Time at reference real(kind=4) ::
('AREF',1,'float32') , #! [ rad] Angular offset at ref. real(kind=4) ::
('APOS',1,'float32') , #! [ rad] Position angle of drift real(kind=4) ::
('TRES',1,'float32') , #! [ ?] Time resolution real(kind=4) ::
('ARES',1,'float32') , #! [ rad] Angular resolution real(kind=4) ::
('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::
('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::
('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::
('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::
('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::
],
}
def _read_bytes(f, n):
return f.read(n)
def _read_byte(f):
return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])
def _read_int16(f):
return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])
def _read_int32(f):
return numpy.int32(struct.unpack('=i', f.read(4))[0])
def _read_int64(f):
return numpy.int64(struct.unpack('=q', f.read(8))[0])
def _read_float32(f):
return numpy.float32(struct.unpack('=f', f.read(4))[0])
def _align_32(f):
pos = f.tell()
if pos % 4 != 0:
f.seek(pos + 4 - pos % 4)
return
def _read_word(f,length):
if length > 0:
chars = _read_bytes(f, length)
_align_32(f)
else:
chars = None
return chars
def _read_int(f):
return struct.unpack('i',f.read(4))
def is_ascii(s):
return len(s) == len(s.decode('ascii').encode('utf-8'))
def is_all_null(s):
return all(x=='\x00' for x in s) or all(x==b'\x00' for x in s)
def _read_indices(f, file_description):
#if file_description['version'] in (1,2):
# extension_positions = (file_description['aex']-1)*file_description['reclen']*4
# all_indices = {extension:
# [_read_index(f,
# filetype=file_description['version'],
# entry=ii,
# #position=position,
# )
# for ii in range(file_description['lex1'])]
# for extension,position in enumerate(extension_positions)
# if position > 0
# }
#elif file_description['version'] == 1:
extension_positions = ((file_description['aex'].astype('int64')-1)
*file_description['reclen']*4)
all_indices = [_read_index(f,
filetype=file_description['version'],
# 1-indexed files
entry_number=ii+1,
file_description=file_description,
)
for ii in range(file_description['xnext']-1)]
#else:
# raise ValueError("Invalid file version {0}".format(file_description['version']))
return all_indices
def _find_index(entry_number, file_description, return_position=False):
if file_description['gex'] == 10:
kex=(entry_number-1)//file_description['lex1'] + 1
else:
# exponential growth:
#kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1
kex = len([xx for xx in file_description['lexn'] if xx<entry_number])
ken = entry_number - file_description['lexn'][kex-1]
#! Find ken (relative entry number in the extension, starts from 1)
#ken = entry_num - file%desc%lexn(kex-1)
kb = ((ken-1)*file_description['lind'])//file_description['reclen']
#kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the
# ! relative record position (as an offset, starts from 0) where the
# ! Entry Index starts. NB: there can be a non-integer number of Entry
# ! Indexes per record
# Subtract 1: 'aex' is 1-indexed
kbl = (file_description['aex'][kex-1]+kb)-1
# kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes
k = ((ken-1)*file_description['lind']) % file_description['reclen']
#k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the
# ! first word of the Entry Index of the entry number 'entry_num'
if return_position:
return (kbl*file_description['reclen']+k)*4
else:
return kbl,k
def _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,
entry_number=None, file_description=None):
if position is not None:
f.seek(position)
if entry_number is not None:
indpos = _find_index(entry_number, file_description, return_position=True)
f.seek(indpos)
x0 = f.tell()
if filetype in ('1A ','v1', 1):
log.debug('Index filetype 1A')
index = {
"XBLOC":_read_int32(f),
"XNUM":_read_int32(f),
"XVER":_read_int32(f),
"XSOURC":_read_word(f,12),
"XLINE":_read_word(f,12),
"XTEL":_read_word(f,12),
"XDOBS":_read_int32(f),
"XDRED":_read_int32(f),
"XOFF1":_read_float32(f),# first offset (real, radians)
"XOFF2":_read_float32(f),# second offset (real, radians)
"XTYPE":_read_int32(f),# coordinate system ('EQ'', 'GA', 'HO')
"XKIND":_read_int32(f),
"XQUAL":_read_int32(f),
"XSCAN":_read_int32(f),
}
index['BLOC'] = index['XBLOC']
index['WORD'] = 1
index['SOURC'] = index['CSOUR'] = index['XSOURC']
index['DOBS'] = index['CDOBS'] = index['XDOBS']
index['CTELE'] = index['XTEL']
index['LINE'] = index['XLINE']
index['OFF1'] = index['XOFF1']
index['OFF2'] = index['XOFF2']
index['QUAL'] = index['XQUAL']
index['SCAN'] = index['XSCAN']
index['KIND'] = index['XKIND']
if clic:
nextchunk = {
"XPROC":_read_int32(f),
"XITYPE":_read_int32(f),
"XHOURANG":_read_float32(f),
"XPROJNAME":_read_int32(f),
"XPAD1":_read_int32(f),
"XBPC" :_read_int32(f),
"XIC" :_read_int32(f),
"XRECEI" :_read_int32(f),
"XUT":_read_float32(f),
"XPAD2":numpy.fromfile(f,count=3,dtype='int32')
}
else:
nextchunk = {"XPOSA":_read_float32(f),
"XSUBSCAN":_read_int32(f),
'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),
}
nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']
nextchunk['POSA'] = nextchunk['XPOSA']
index.update(nextchunk)
if (f.tell() - x0 != 128):
missed_bits = (f.tell()-x0)
X = f.read(128-missed_bits)
if DEBUG: print("read_index missed %i bits: %s" % (128-missed_bits,X))
if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):
raise ValueError("Invalid index read from {0}.".format(x0))
elif filetype in ('2A ','v2', 2):
log.debug('Index filetype 2A')
index = {
"BLOC" : _read_int64(f) ,
"WORD" : _read_int32(f) ,
"NUM" : _read_int64(f) ,
"VER" : _read_int32(f) ,
"CSOUR" : _read_word(f,12),
"CLINE" : _read_word(f,12),
"CTELE" : _read_word(f,12),
"DOBS" : _read_int32(f) ,
"DRED" : _read_int32(f) ,
"OFF1" : _read_float32(f),
"OFF2" : _read_float32(f),
"TYPE" : _read_int32(f) ,
"KIND" : _read_int32(f) ,
"QUAL" : _read_int32(f) ,
"POSA" : _read_float32(f),
"SCAN" : _read_int64(f) ,
"SUBSCAN": _read_int32(f) ,
}
if any((is_all_null(index[x]) or not is_ascii(index[x]))
for x in ('CSOUR','CLINE','CTELE')):
raise ValueError("Invalid index read from {0}.".format(x0))
index['SOURC'] = index['XSOURC'] = index['CSOUR']
index['LINE'] = index['XLINE'] = index['CLINE']
index['XKIND'] = index['KIND']
try:
index['DOBS'] = index['XDOBS'] = index['CDOBS']
except KeyError:
index['CDOBS'] = index['XDOBS'] = index['DOBS']
else:
raise NotImplementedError("Filetype {0} not implemented.".format(filetype))
index['MJD'] = index['DOBS'] + 60549
class_dobs = index['DOBS']
index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)
log.debug("Indexing finished at {0}".format(f.tell()))
return index
def _read_header(f, type=0, position=None):
if position is not None:
f.seek(position)
if type in keys_lengths:
hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])
for x in keys_lengths[type]]
return dict(hdrsec)
else:
return {}
raise ValueError("Unrecognized type {0}".format(type))
def _read_first_record(f):
f.seek(0)
filetype = f.read(4)
if fileversion_dict[filetype] == 'v1':
return _read_first_record_v1(f)
elif fileversion_dict[filetype] == 'v2':
return _read_first_record_v2(f)
else:
raise ValueError("Unrecognized filetype {0}".format(filetype))
def _read_first_record_v1(f, record_length_words=128):
)
file_description = {
'code': f.read(4),
'next': _read_int32(f),
'lex': _read_int32(f),
'nex': _read_int32(f),
'xnext': _read_int32(f),
'gex': 10.,
'vind': 1,
'version': 1,
'nextrec': 3,
'nextword': 1,
'lind': 32,
'kind': 'unknown',
'flags': 0,
}
file_description['reclen'] = record_length_words
ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')
file_description['ex'] = ex[ex!=0]
file_description['nextrec'] = file_description['next']
file_description['lex1'] = file_description['lex'] # number of entries
file_description['lexn'] = (np.arange(file_description['nex']+1) *
file_description['lex1'])
file_description['nentries'] = np.sum(file_description['lexn'])
file_description['aex'] = file_description['ex'][:file_description['nex']]
#file_description['version'] = fileversion_dict[file_description['code']]
assert f.tell() == 1024
# Something is not quite right with the 'ex' parsing
#assert len(file_description['ex']) == file_description['nex']
return file_description
def _read_first_record_v2(f):
f.seek(0)
file_description = {
'code': f.read(4),
'reclen': _read_int32(f),
'kind': _read_int32(f),
'vind': _read_int32(f),
'lind': _read_int32(f),
'flags': _read_int32(f),
'xnext': _read_int64(f),
'nextrec': _read_int64(f),
'nextword': _read_int32(f),
'lex1': _read_int32(f),
'nex': _read_int32(f),
'gex': _read_int32(f),
}
file_description['lexn'] = [0]
if file_description['gex'] == 10:
for ii in range(1, file_description['nex']+1):
file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])
else:
#! Exponential growth. Only growth with mantissa 2.0 is supported
for ii in range(1, file_description['nex']):
# I don't know what the fortran does here!!!
nent = int(file_description['lex1'] * 2**(ii-1))
file_description['lexn'].append(file_description['lexn'][-1]+nent)
file_description['nentries'] = np.sum(file_description['lexn'])
record_length_words = file_description['reclen']
aex = numpy.fromfile(f, count=(record_length_words-15)//2, dtype='int64')
file_description['aex'] = aex[aex!=0]
assert len(file_description['aex']) == file_description['nex']
file_description['version'] = 2
return file_description
def gi8_dicho(ninp,lexn,xval,ceil=True):
iinf = 1
isup = ninp
while isup > (iinf+1):
imid = int(np.floor((isup + iinf)/2.))
if (lexn[imid-1] < xval):
iinf = imid
else:
isup = imid
ival = isup
return ival
def _read_obshead(f, file_description, position=None, verbose=False):
if file_description['version'] == 1:
return _read_obshead_v1(f, position=position, verbose=verbose)
if file_description['version'] == 2:
return _read_obshead_v2(f, position=position)
else:
raise ValueError("Invalid file version {0}.".
format(file_description['version']))
def _read_obshead_v2(f, position=None):
if position is not None:
f.seek(position)
else:
position = f.tell()
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(position))
f.seek(position)
entrydescv2_nw1 = 11
entrydescv2_nw2 = 5
obshead = {
'CODE': f.read(4),
'VERSION': _read_int32(f),
'NSEC': _read_int32(f),
'NWORD': _read_int64(f),
'ADATA': _read_int64(f),
'LDATA': _read_int64(f),
'XNUM': _read_int64(f),
}
section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')
section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')
return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))
def _read_obshead_v1(f, position=None, verbose=False):
if position is not None:
f.seek(position)
IDcode = f.read(4)
if IDcode.strip() != b'2':
raise IndexError("Observation Header reading failure at {0}. "
"Record does not appear to be an observation header.".
format(f.tell() - 4))
(nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,
obsnum) = numpy.fromfile(f, count=8, dtype='int32')
if verbose:
print("nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)
print("DATA_LENGTH: ",data_length)
seccodes = numpy.fromfile(f,count=nsec,dtype='int32')
seclen = numpy.fromfile(f,count=nsec,dtype='int32')
secaddr = numpy.fromfile(f,count=nsec,dtype='int32')
if verbose:
print("Section codes, addresses, lengths: ",seccodes,secaddr,seclen)
hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,
'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,
'NSEC':nsec, 'OBSNUM':obsnum}
return obsnum,hdr,dict(zip(seccodes,secaddr))
# Not entirely clear what this is, but it is stuff that precedes the actual data
#
# Looks something like this:
# array([ 1, -2, -3, -4, -14,
# 9, 17, 18, 25, 55,
# 64, 81, 99, -1179344801, 979657591,
#
# -2, -3, -4, -14 indicate the 4 header types
# 9,17,18,25 *MAY* indicate the number of bytes in each
#
#
# HOW is it indicated how many entries there are?
# """
or=np.mean, weight=None):
if myarr.ndim != 1:
raise ValueError("Only works on 1d data. Says so in the title.")
xs = myarr.size
crarr = myarr[:xs-(xs % int(factor))]
if weight is None:
dsarr = estimator(np.concatenate([[crarr[i::factor] for i in
range(factor)]]),axis=0)
else:
dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in
range(factor)]]),axis=0)
warr = estimator(np.concatenate([[weight[i::factor] for i in
range(factor)]]),axis=0)
dsarr = dsarr/warr
return dsarr
def test_downsample1d():
data = np.arange(10)
weight = np.ones(10)
weight[5]=0
assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==
np.array([0.5, 2.5, 4.0, 6.5, 8.5]))
def read_observation(f, obsid, file_description=None, indices=None,
my_memmap=None, memmap=True, verbose=False):
if isinstance(f, str):
f = open(f,'rb')
opened = True
if memmap:
my_memmap = numpy.memmap(f, offset=0, dtype='float32',
mode='r')
else:
my_memmap = None
elif my_memmap is None and memmap:
raise ValueError("Must pass in a memmap object if passing in a file object.")
else:
opened = False
if file_description is None:
file_description = _read_first_record(f)
if indices is None:
indices = _read_indices(f, file_description)
index = indices[obsid]
obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4
log.debug("Reading observation at position {0}".format(obs_position))
obsnum,obshead,sections = _read_obshead(f, file_description,
position=obs_position,
verbose=verbose)
header = obshead
datastart = 0
for section_id,section_address in iteritems(sections):
sec_position = obs_position + (section_address-1)*4
temp_hdr = _read_header(f, type=header_id_numbers[section_id],
position=sec_position)
header.update(temp_hdr)
datastart = max(datastart,f.tell())
hdr = header
hdr.update(obshead)
hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})
hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})
hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})
hdr.update({'OBJECT':hdr['SOURC'].strip()})
hdr.update({'BUNIT':'Tastar'})
hdr.update({'EXPOSURE':float(hdr['TIME'])})
hdr['HDRSTART'] = obs_position
hdr['DATASTART'] = datastart
hdr.update(indices[obsid])
hdr.update({'OBSDATE': hdr['MJD'] + hdr['UT']/2./pi})
if hdr['KIND'] == 1:
nchan = hdr['NPOIN']
elif 'NCHAN' in hdr:
nchan = hdr['NCHAN']
else:
log.error("No NCHAN in header. This is not a spectrum.")
import ipdb; ipdb.set_trace()
f.seek(datastart-1)
spec = _read_spectrum(f, position=datastart-1, nchan=nchan,
memmap=memmap, my_memmap=my_memmap)
if opened:
f.close()
return spec, hdr
def _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):
if position != f.tell():
log.warning("Reading data from {0}, but the file is wound "
"to {1}.".format(position, f.tell()))
if memmap:
here = position
spectrum = my_memmap[here//4:here//4+nchan]
f.seek(here+nchan*4)
else:
f.seek(position)
spectrum = numpy.fromfile(f,count=nchan,dtype='float32')
return spectrum
def _spectrum_from_header(fileobj, header, memmap=None):
return _read_spectrum(fileobj, position=header['DATASTART'],
nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],
my_memmap=memmap)
def clean_header(header):
newheader = {}
for k in header:
if not isinstance(header[k], (int, float, str)):
if isinstance(header[k], np.ndarray) and header[k].size > 1:
if header[k].size > 10:
raise ValueError("Large array being put in header. That's no good. key={0}".format(k))
for ii,val in enumerate(header[k]):
newheader[k[:7]+str(ii)] = val
else:
newheader[k[:8]] = str(header[k])
else:
newheader[k[:8]] = header[k]
return newheader
class ClassObject(object):
def __init__(self, filename, verbose=False):
t0 = time.time()
self._file = open(filename, 'rb')
self.file_description = _read_first_record(self._file)
self.allind = _read_indices(self._file, self.file_description)
self._data = np.memmap(self._file, dtype='float32', mode='r')
if verbose: log.info("Setting _spectra")
self._spectra = LazyItem(self)
t1 = time.time()
if verbose: log.info("Setting posang. t={0}".format(t1-t0))
self.set_posang()
t2 = time.time()
if verbose: log.info("Identifying otf scans. t={0}".format(t2-t1))
self._identify_otf_scans(verbose=verbose)
t3 = time.time()
#self._load_all_spectra()
if verbose:
log.info("Loaded CLASS object with {3} indices. Time breakdown:"
" {0}s for indices, "
"{1}s for posang, and {2}s for OTF scan identification"
.format(t1-t0, t2-t1, t3-t2, len(self.allind)))
def __repr__(self):
s = "\n".join(["{k}: {v}".format(k=k,v=v)
for k,v in iteritems(self.getinfo())])
return "ClassObject({id}) with {nspec} entries\n".format(id=id(self),
nspec=len(self.allind)) + s
def getinfo(self, allsources=False):
info = dict(
tels = self.tels,
lines = self.lines,
scans = self.scans,
sources = self.sources if allsources else self.sci_sources,
)
return info
def set_posang(self):
h0 = self.headers[0]
for h in self.headers:
dx = h['OFF1'] - h0['OFF1']
dy = h['OFF2'] - h0['OFF2']
h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi
h0 = h
def _identify_otf_scans(self, verbose=False):
h0 = self.allind[0]
st = 0
otfscan = 0
posangs = [h['COMPPOSA'] for h in self.allind]
if verbose:
pb = ProgressBar(len(self.allind))
for ii,h in enumerate(self.allind):
if (h['SCAN'] != h0['SCAN']
or h['SOURC'] != h0['SOURC']):
h0['FIRSTSCAN'] = st
cpa = np.median(posangs[st:ii])
for hh in self.allind[st:ii]:
hh['SCANPOSA'] = cpa % 180
st = ii
if h['SCAN'] == h0['SCAN']:
h0['OTFSCAN'] = otfscan
otfscan += 1
h['OTFSCAN'] = otfscan
else:
otfscan = 0
h['OTFSCAN'] = otfscan
else:
h['OTFSCAN'] = otfscan
if verbose:
pb.update(ii)
def listscans(self, source=None, telescope=None, out=sys.stdout):
minid=0
scan = -1
sourc = ""
#tel = ''
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
print("{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} "
"[ {RAmin:>12s}, {RAmax:>12s} ] "
"[ {DECmin:>12s}, {DECmax:>12s} ] "
"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}"
.format(entries='Scans', SOURC='Source', XTEL='Telescope',
SCAN='Scan', SUBSCAN='Subscan',
RAmin='min(RA)', RAmax='max(RA)',
DECmin='min(DEC)', DECmax='max(DEC)',
SCANPOSA='Scan PA',
angle='Angle', OTFSCAN='OTFscan',
TSYS='TSYS', UTD='UTD'),
file=out)
data_rows = []
for ii,row in enumerate(self.headers):
if (row['SCAN'] == scan
and row['SOURC'] == sourc
#and row['XTEL'] == tel
):
minoff1 = min(minoff1, row['OFF1'])
maxoff1 = max(maxoff1, row['OFF1'])
minoff2 = min(minoff2, row['OFF2'])
maxoff2 = max(maxoff2, row['OFF2'])
ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],
row['OFF1'] - prevrow['OFF1'])%np.pi
nangle += 1
prevrow = row
else:
if scan == -1:
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
prevrow = row
continue
ok = True
if source is not None:
if isinstance(source, (list,tuple)):
ok = ok and any(re.search((s), prevrow['SOURC'])
for s in source)
else:
ok = ok and re.search((source), prevrow['SOURC'])
if telescope is not None:
ok = ok and re.search((telescope), prevrow['XTEL'])
if ok:
data = dict(RAmin=minoff1*180/np.pi*3600,
RAmax=maxoff1*180/np.pi*3600,
DECmin=minoff2*180/np.pi*3600,
DECmax=maxoff2*180/np.pi*3600,
angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,
e0=minid,
e1=ii-1,
#TSYS=row['TSYS'] if 'TSYS' in row else '--',
UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,
**prevrow)
print("{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} "
"[ {RAmin:12f}, {RAmax:12f} ] "
"[ {DECmin:12f}, {DECmax:12f} ] "
"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}"
" {TSYS:>8.1f} {UTD:12f}".
format(**data),
file=out)
data_rows.append(data)
minoff1,maxoff1 = np.inf,-np.inf
minoff2,maxoff2 = np.inf,-np.inf
ttlangle,nangle = 0.0,0
scan = row['SCAN']
sourc = row['SOURC']
#tel = row['XTEL']
minid = ii
return data
@property
def tels(self):
if hasattr(self,'_tels'):
return self._tels
else:
self._tels = set([h['CTELE'] for h in self.allind])
#testing if CTELE even works
return self._tels
@property
def sources(self):
if hasattr(self,'_source'):
return self._source
else:
self._source = set([h['SOURC'] for h in self.allind])
return self._source
@property
def scans(self):
if hasattr(self,'_scan'):
return self._scan
else:
self._scan = set([h['SCAN'] for h in self.allind])
return self._scan
@property
def sci_sources(self):
return set([s for s in self.sources
if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',
'COLD')])
@property
def lines(self):
if hasattr(self,'_lines'):
return self._lines
else:
self._lines = set([h['LINE'] for h in self.allind])
return self._lines
def _load_all_spectra(self, indices=None):
if indices is None:
indices = range(self.file_description['xnext']-1)
if hasattr(self, '_loaded_indices'):
indices_set = set(indices)
indices_to_load = (indices_set.difference(self._loaded_indices))
self._loaded_indices = self._loaded_indices.union(indices_set)
if any(indices_to_load):
pb = ProgressBar(len(indices_to_load))
for ii,k in enumerate(xrange(indices_to_load)):
self._spectra[k]
pb.update(ii)
else:
self._loaded_indices = set(indices)
self._spectra.load_all()
@property
def spectra(self):
return [x[0] for x in self._spectra]
@property
def headers(self):
return [self._spectra[ii][1]
if ii in self._spectra else x
for ii,x in enumerate(self.allind)]
def select_spectra(self,
all=None,
line=None,
linere=None,
linereflags=re.IGNORECASE,
number=None,
scan=None,
offset=None,
source=None,
sourcere=None,
sourcereflags=re.IGNORECASE,
range=None,
quality=None,
telescope=None,
telescopere=None,
telescopereflags=re.IGNORECASE,
subscan=None,
entry=None,
posang=None,
#observed=None,
#reduced=None,
frequency=None,
section=None,
user=None,
include_old_versions=False,
):
if entry is not None and len(entry)==2:
return irange(entry[0], entry[1])
if frequency is not None:
self._load_all_spectra()
sel = [(re.search(re.escape(ensure_bytes(line)), h['LINE'], re.IGNORECASE)
if line is not None else True) and
(re.search(ensure_bytes(linere), h['LINE'], linereflags)
if linere is not None else True) and
(h['SCAN'] == scan if scan is not None else True) and
((h['OFF1'] == offset or
h['OFF2'] == offset) if offset is not None else True) and
(re.search(re.escape(ensure_bytes(source)), h['CSOUR'], re.IGNORECASE)
if source is not None else True) and
(re.search(ensure_bytes(sourcere), h['CSOUR'], sourcereflags)
if sourcere is not None else True) and
(h['OFF1']>range[0] and h['OFF1'] < range[1] and
h['OFF2']>range[2] and h['OFF2'] < range[3]
if range is not None and len(range)==4 else True) and
(h['QUAL'] == quality if quality is not None else True) and
(re.search(re.escape(ensure_bytes(telescope)), h['CTELE'], re.IGNORECASE)
if telescope is not None else True) and
(re.search(ensure_bytes(telescopere), h['CTELE'], telescopereflags)
if telescopere is not None else True) and
(h['SUBSCAN']==subscan if subscan is not None else True) and
('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way
h['RESTF'] > frequency[0] and
h['RESTF'] < frequency[1]
if frequency is not None and len(frequency)==2
else True) and
(h['COMPPOSA']%180 > posang[0] and
h['COMPPOSA']%180 < posang[1]
if posang is not None and len(posang)==2
else True) and
# probably not a valid spectrum?
(h.get('XVER', h.get('VER', -999)) > 0
if not include_old_versions else True)
for h in self.headers
]
return [ii for ii,k in enumerate(sel) if k]
def get_spectra(self, progressbar=True, **kwargs):
selected_indices = self.select_spectra(**kwargs)
if not any(selected_indices):
raise ValueError("Selection yielded empty.")
self._spectra.load(selected_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in selected_indices]
def get_pyspeckit_spectra(self, progressbar=True, **kwargs):
spdata = self.get_spectra(progressbar=progressbar, **kwargs)
spectra = [pyspeckit.Spectrum(data=data,
xarr=make_axis(header),
header=clean_header(header))
for data,header in spdata]
return spectra
def read_observations(self, observation_indices, progressbar=True):
self._spectra.load(observation_indices, progressbar=progressbar)
return [self._spectra[ii] for ii in observation_indices]
@print_timing
def read_class(filename, downsample_factor=None, sourcename=None,
telescope=None, line=None, posang=None, verbose=False,
flag_array=None):
classobj = ClassObject(filename)
if not isinstance(sourcename, (list,tuple)):
sourcename = [sourcename]
if not isinstance(telescope, (list,tuple)):
telescope = [telescope]
if not isinstance(line, (list,tuple)):
line = [line]
spectra,headers = [],[]
if verbose:
log.info("Reading...")
selection = [ii
for source in sourcename
for tel in telescope
for li in line
for ii in classobj.select_spectra(sourcere=source,
telescope=tel,
line=li,
posang=posang)]
sphdr = classobj.read_observations(selection)
if len(sphdr) == 0:
return None
spec,hdr = zip(*sphdr)
spectra += spec
headers += hdr
indexes = headers
weight = ~flag_array if flag_array is not None else None
if downsample_factor is not None:
if verbose:
log.info("Downsampling...")
spectra = [downsample_1d(spec, downsample_factor,
weight=weight)
for spec in ProgressBar(spectra)]
headers = [downsample_header(h, downsample_factor)
for h in ProgressBar(headers)]
for hdr in headers:
stringify_header(hdr)
return spectra,headers,indexes
def stringify_header(header):
from six import string_types, integer_types
import string
FITS_allowed_types = (string_types + integer_types +
(float, complex, bool, np.floating, np.integer,
np.complexfloating, np.bool_))
bad_chars = string.printable[96:]
badcharre = re.compile("[{0}]".format(bad_chars))
for key, value in header.items():
if isinstance(value, bytes):
header[key] = value.decode()
elif not isinstance(value, FITS_allowed_types):
header[key] = badcharre.sub("", str(header[key]))
def downsample_header(hdr, downsample_factor):
for k in ('NCHAN','NPOIN','DATALEN'):
if k in hdr:
hdr[k] = int((hdr[k] / downsample_factor))
# maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1
scalefactor = 1./downsample_factor
hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.
for kw in ['FRES','VRES']:
if kw in hdr:
hdr[kw] *= downsample_factor
return hdr
def make_axis(header,imagfreq=False):
from .. import units
rest_frequency = header.get('RESTF')
xunits = 'MHz'
nchan = header.get('NCHAN')
voff = header.get('VOFF')
foff = header.get('FOFF')
doppler = header.get('DOPPLER')
fres = header.get('FRES')
refchan = header.get('RCHAN')
imfreq = header.get('IMAGE')
if foff in (None, 0.0) and voff not in (None, 0.0):
# Radio convention
foff = -voff/2.997924580e5 * rest_frequency
if not imagfreq:
xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)
else:
xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres
XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)
return XAxis
@print_timing
def class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,
imagfreq=False, DEBUG=False, **kwargs):
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
obslist = []
lastscannum = -1
spectrumlist = None
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
# this is slow but necessary...
H = pyfits.Header()
for k,v in iteritems(hdr):
if hasattr(v,"__len__") and not isinstance(v,str):
# make an array of header entries, but this
# supports only up to 10 of them...
if len(v) > 1:
if len(v) < 10:
for ii,vv in enumerate(v):
newkey = k[:7]+str(ii)
H[newkey] = vv
elif len(v) < 100:
for ii,vv in enumerate(v):
newkey = k[:6]+str(ii)
H[newkey] = vv
else:
raise ValueError("Too many entries for {0}".format(k))
else:
H[k] = v[0]
#elif not any(x in str(v).lower() for x in ('comment', 'end', 'history')):
# # do not try to add comments...
# This commented out block used to attempt to reject comments
# using a private regex in the old pyfits which no longer exists.
# I don't know if it was necessary.
else:
H[k] = v
scannum = hdr['SCAN']
if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:
continue
if hdr['LINE'].strip() not in line:
continue
if (source is not None) and (hdr['SOURC'].strip() not in source):
continue
hdr['RESTFREQ'] = hdr.get('RESTF')
H['RESTFREQ'] = hdr.get('RESTF')
if scannum != lastscannum:
lastscannum = scannum
if spectrumlist is not None:
obslist.append(pyspeckit.ObsBlock(spectrumlist))
xarr = make_axis(hdr,imagfreq=imagfreq)
spectrumlist = [(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))]
else:
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=H,
data=sp))
return obslist
class LazyItem(object):
def __init__(self, parent):
self.parent = parent
self.sphdr = {}
self.nind = len(self.parent.allind)
self.nloaded = 0
def __repr__(self):
return ("Set of {0} spectra & headers, {1} loaded"
" ({2:0.2f}%)".format(self.nind, self.nloaded,
(float(self.nloaded)/self.nind)*100))
def load_all(self, progressbar=True):
self.load(range(self.nind))
def load(self, indices, progressbar=True):
pb = ProgressBar(len(indices))
counter = 0
for k in indices:
self[k]
counter += 1
pb.update(counter)
def __getitem__(self, key):
if key in self.sphdr:
return self.sphdr[key]
elif isinstance(key, slice):
return [self[k] for k in xrange(key.start or 0,
key.end or len(self.parent.allind),
key.step or 1)]
else:
sphd = read_observation(self.parent._file, key,
file_description=self.parent.file_description,
indices=self.parent.allind,
my_memmap=self.parent._data)
sphd[1].update(self.parent.allind[key])
self.sphdr[key] = sphd
self.nloaded += 1
return sphd
def __iter__(self):
return self.next()
def __next__(self):
for k in self.spheader:
yield self.spheader[k]
def __contains__(self, key):
return key in self.sphdr
@print_timing
def class_to_spectra(filename, datatuple=None, **kwargs):
if datatuple is None:
spectra,header,indexes = read_class(filename, **kwargs)
else:
spectra,header,indexes = datatuple
spectrumlist = []
for sp,hdr,ind in zip(spectra,header,indexes):
hdr.update(ind)
xarr = make_axis(hdr)
spectrumlist.append(
pyspeckit.Spectrum(xarr=xarr,
header=hdr,
data=sp))
return pyspeckit.Spectra(spectrumlist)
def tests():
fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'
p = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])
hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)'])
| true | true |
f71781d481d127f72294f8baec04d9d74461c11a | 121 | py | Python | okta/models/usergroup/__init__.py | rkhleics/oktasdk-python | da8183444704c6d16831d1edd619390e9120dd70 | [
"Apache-2.0"
] | 1 | 2020-09-09T12:59:19.000Z | 2020-09-09T12:59:19.000Z | okta/models/usergroup/__init__.py | torchbox/oktasdk-python | da8183444704c6d16831d1edd619390e9120dd70 | [
"Apache-2.0"
] | null | null | null | okta/models/usergroup/__init__.py | torchbox/oktasdk-python | da8183444704c6d16831d1edd619390e9120dd70 | [
"Apache-2.0"
] | 2 | 2017-11-02T22:12:57.000Z | 2019-09-16T08:02:23.000Z | from .UserGroup import UserGroup
from .UserGroupProfile import UserGroupProfile
from .UserGroupRule import UserGroupRule
| 30.25 | 46 | 0.876033 | from .UserGroup import UserGroup
from .UserGroupProfile import UserGroupProfile
from .UserGroupRule import UserGroupRule
| true | true |
f717822d090647eb7a44dad23a51405caa178759 | 1,679 | py | Python | stockbot/ticker/sinotrade/session.py | tanlin2013/stockbot | 08322ed4d847ea9e58b091985cef5c128a694b12 | [
"Apache-2.0"
] | 1 | 2021-07-12T23:55:20.000Z | 2021-07-12T23:55:20.000Z | stockbot/ticker/sinotrade/session.py | ajmal017/stockbot-7 | 08322ed4d847ea9e58b091985cef5c128a694b12 | [
"Apache-2.0"
] | null | null | null | stockbot/ticker/sinotrade/session.py | ajmal017/stockbot-7 | 08322ed4d847ea9e58b091985cef5c128a694b12 | [
"Apache-2.0"
] | 1 | 2021-07-12T23:55:12.000Z | 2021-07-12T23:55:12.000Z | import os
import logging
import pandas as pd
from datetime import date
from shioaji import Shioaji
class Session(Shioaji):
def __init__(self, simulation: bool = False, timeout: int = 10000) -> None:
"""
Args:
simulation:
timeout:
Notes: The ID of test account ranging from `PAPIUSER01` to `PAPIUSER08`,
with password `2222`.
"""
_person_id = f"PAPIUSER05" \
if simulation else os.environ['SINOTRADE_ID']
_passwd = "2222" \
if simulation else os.environ['SINOTRADE_PASSWD']
super(Session, self).__init__(simulation=simulation)
self.login(
person_id=_person_id,
passwd=_passwd,
contracts_cb=lambda security_type: logging.info(f"{repr(security_type)} fetch done."),
contracts_timeout=timeout
)
def __del__(self) -> None:
self.logout()
logging.info("session closed.")
@property
def positions(self) -> pd.DataFrame:
return pd.DataFrame(
self.list_positions(self.stock_account)
)
def profit_loss(self, begin_date: date, end_date: date) -> pd.DataFrame:
return pd.DataFrame(self.list_profit_loss(
self.stock_account,
begin_date=begin_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
))
@property
def settlements(self) -> pd.DataFrame:
return pd.DataFrame(
self.list_settlements(self.stock_account)
)
@property
def balance(self) -> pd.DataFrame:
return pd.DataFrame(
self.account_balance()
)
| 27.983333 | 98 | 0.596784 | import os
import logging
import pandas as pd
from datetime import date
from shioaji import Shioaji
class Session(Shioaji):
def __init__(self, simulation: bool = False, timeout: int = 10000) -> None:
_person_id = f"PAPIUSER05" \
if simulation else os.environ['SINOTRADE_ID']
_passwd = "2222" \
if simulation else os.environ['SINOTRADE_PASSWD']
super(Session, self).__init__(simulation=simulation)
self.login(
person_id=_person_id,
passwd=_passwd,
contracts_cb=lambda security_type: logging.info(f"{repr(security_type)} fetch done."),
contracts_timeout=timeout
)
def __del__(self) -> None:
self.logout()
logging.info("session closed.")
@property
def positions(self) -> pd.DataFrame:
return pd.DataFrame(
self.list_positions(self.stock_account)
)
def profit_loss(self, begin_date: date, end_date: date) -> pd.DataFrame:
return pd.DataFrame(self.list_profit_loss(
self.stock_account,
begin_date=begin_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
))
@property
def settlements(self) -> pd.DataFrame:
return pd.DataFrame(
self.list_settlements(self.stock_account)
)
@property
def balance(self) -> pd.DataFrame:
return pd.DataFrame(
self.account_balance()
)
| true | true |
f71782577c5c2b9953da5e9ae6c0a019748c14b7 | 3,361 | py | Python | src/application-insights/azext_applicationinsights/vendored_sdks/applicationinsights/models/events_trace_result.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | src/application-insights/azext_applicationinsights/vendored_sdks/applicationinsights/models/events_trace_result.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | src/application-insights/azext_applicationinsights/vendored_sdks/applicationinsights/models/events_trace_result.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .events_result_data import EventsResultData
class EventsTraceResult(EventsResultData):
"""A trace result.
All required parameters must be populated in order to send to Azure.
:param id: The unique ID for this event.
:type id: str
:param count: Count of the event
:type count: long
:param timestamp: Timestamp of the event
:type timestamp: datetime
:param custom_dimensions: Custom dimensions of the event
:type custom_dimensions:
~azure.applicationinsights.models.EventsResultDataCustomDimensions
:param custom_measurements: Custom measurements of the event
:type custom_measurements:
~azure.applicationinsights.models.EventsResultDataCustomMeasurements
:param operation: Operation info of the event
:type operation: ~azure.applicationinsights.models.EventsOperationInfo
:param session: Session info of the event
:type session: ~azure.applicationinsights.models.EventsSessionInfo
:param user: User info of the event
:type user: ~azure.applicationinsights.models.EventsUserInfo
:param cloud: Cloud info of the event
:type cloud: ~azure.applicationinsights.models.EventsCloudInfo
:param ai: AI info of the event
:type ai: ~azure.applicationinsights.models.EventsAiInfo
:param application: Application info of the event
:type application: ~azure.applicationinsights.models.EventsApplicationInfo
:param client: Client info of the event
:type client: ~azure.applicationinsights.models.EventsClientInfo
:param type: Required. Constant filled by server.
:type type: str
:param trace:
:type trace: ~azure.applicationinsights.models.EventsTraceInfo
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'count': {'key': 'count', 'type': 'long'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'custom_dimensions': {'key': 'customDimensions', 'type': 'EventsResultDataCustomDimensions'},
'custom_measurements': {'key': 'customMeasurements', 'type': 'EventsResultDataCustomMeasurements'},
'operation': {'key': 'operation', 'type': 'EventsOperationInfo'},
'session': {'key': 'session', 'type': 'EventsSessionInfo'},
'user': {'key': 'user', 'type': 'EventsUserInfo'},
'cloud': {'key': 'cloud', 'type': 'EventsCloudInfo'},
'ai': {'key': 'ai', 'type': 'EventsAiInfo'},
'application': {'key': 'application', 'type': 'EventsApplicationInfo'},
'client': {'key': 'client', 'type': 'EventsClientInfo'},
'type': {'key': 'type', 'type': 'str'},
'trace': {'key': 'trace', 'type': 'EventsTraceInfo'},
}
def __init__(self, **kwargs):
super(EventsTraceResult, self).__init__(**kwargs)
self.trace = kwargs.get('trace', None)
self.type = 'trace'
| 43.649351 | 107 | 0.650997 |
from .events_result_data import EventsResultData
class EventsTraceResult(EventsResultData):
_validation = {
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'count': {'key': 'count', 'type': 'long'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'custom_dimensions': {'key': 'customDimensions', 'type': 'EventsResultDataCustomDimensions'},
'custom_measurements': {'key': 'customMeasurements', 'type': 'EventsResultDataCustomMeasurements'},
'operation': {'key': 'operation', 'type': 'EventsOperationInfo'},
'session': {'key': 'session', 'type': 'EventsSessionInfo'},
'user': {'key': 'user', 'type': 'EventsUserInfo'},
'cloud': {'key': 'cloud', 'type': 'EventsCloudInfo'},
'ai': {'key': 'ai', 'type': 'EventsAiInfo'},
'application': {'key': 'application', 'type': 'EventsApplicationInfo'},
'client': {'key': 'client', 'type': 'EventsClientInfo'},
'type': {'key': 'type', 'type': 'str'},
'trace': {'key': 'trace', 'type': 'EventsTraceInfo'},
}
def __init__(self, **kwargs):
super(EventsTraceResult, self).__init__(**kwargs)
self.trace = kwargs.get('trace', None)
self.type = 'trace'
| true | true |
f71782e87705531559d4a97ca72db46a973a03f6 | 30,692 | py | Python | examples/ner/run_ner_strain.py | Tarpelite/BERT_self_training | f50ff015f0d3669b5d927a6d28d8a08201c101b6 | [
"MIT"
] | null | null | null | examples/ner/run_ner_strain.py | Tarpelite/BERT_self_training | f50ff015f0d3669b5d927a6d28d8a08201c101b6 | [
"MIT"
] | null | null | null | examples/ner/run_ner_strain.py | Tarpelite/BERT_self_training | f50ff015f0d3669b5d927a6d28d8a08201c101b6 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). """
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from seqeval.metrics import f1_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import pickle
# from pudb import set_trace
# set_trace()
from transformers import (
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), ())
TOKENIZER_ARGS = ["do_lower_case", "strip_accents", "keep_accents", "use_fast"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total * args.warmup_ratio)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iter(loss=X.XXX, lr=X.XXXXXXXX)", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "soft_labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev")
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
print("preds:", preds_list[0])
print("labels:", out_label_list[0])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.eval_file, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=pad_token_label_id,
)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--labels",
default="",
type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Whether to run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument(
"--keep_accents", action="store_const", const=True, help="Set this flag if model is trained with accents."
)
parser.add_argument(
"--strip_accents", action="store_const", const=True, help="Set this flag if model is trained without accents."
)
parser.add_argument("--use_fast", action="store_const", const=True, help="Set this flag to use fast tokenization.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--logits_file", type=str, default="")
parser.add_argument("--eval_file", type=str, default="")
parser.add_argument("--warmup_ratio", type=float, default=0.1)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
id2label={str(i): label for i, label in enumerate(labels)},
label2id={label: i for i, label in enumerate(labels)},
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}
logger.info("Tokenizer arguments: %s", tokenizer_args)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
**tokenizer_args,
)
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
# train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
with open(args.logits_file, "rb") as f:
datasets = pickle.load(f)
all_input_ids = torch.tensor(datasets[0], dtype=torch.long)
all_input_mask = torch.tensor(datasets[1], dtype=torch.long)
all_segment_ids = torch.tensor(datasets[2], dtype=torch.long)
all_ner_logits = torch.tensor(datasets[3], dtype=torch.float)
train_dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ner_logits)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = AutoModelForTokenClassification.from_pretrained(checkpoint)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
model = AutoModelForTokenClassification.from_pretrained(args.output_dir)
model.to(args.device)
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test")
# Save results
output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(args.output_dir, "test_predictions.txt")
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = line.split()[0] + " " + predictions[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
| 44.161151 | 150 | 0.655741 |
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from seqeval.metrics import f1_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import pickle
from transformers import (
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), ())
TOKENIZER_ARGS = ["do_lower_case", "strip_accents", "keep_accents", "use_fast"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total * args.warmup_ratio)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iter(loss=X.XXX, lr=X.XXXXXXXX)", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "soft_labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
)
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev")
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
print("preds:", preds_list[0])
print("labels:", out_label_list[0])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.eval_file, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=pad_token_label_id,
)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--labels",
default="",
type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Whether to run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument(
"--keep_accents", action="store_const", const=True, help="Set this flag if model is trained with accents."
)
parser.add_argument(
"--strip_accents", action="store_const", const=True, help="Set this flag if model is trained without accents."
)
parser.add_argument("--use_fast", action="store_const", const=True, help="Set this flag to use fast tokenization.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--logits_file", type=str, default="")
parser.add_argument("--eval_file", type=str, default="")
parser.add_argument("--warmup_ratio", type=float, default=0.1)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
if args.server_ip and args.server_port:
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
set_seed(args)
labels = get_labels(args.labels)
num_labels = len(labels)
pad_token_label_id = CrossEntropyLoss().ignore_index
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
id2label={str(i): label for i, label in enumerate(labels)},
label2id={label: i for i, label in enumerate(labels)},
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}
logger.info("Tokenizer arguments: %s", tokenizer_args)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
**tokenizer_args,
)
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
if args.do_train:
with open(args.logits_file, "rb") as f:
datasets = pickle.load(f)
all_input_ids = torch.tensor(datasets[0], dtype=torch.long)
all_input_mask = torch.tensor(datasets[1], dtype=torch.long)
all_segment_ids = torch.tensor(datasets[2], dtype=torch.long)
all_ner_logits = torch.tensor(datasets[3], dtype=torch.float)
train_dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ner_logits)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = AutoModelForTokenClassification.from_pretrained(checkpoint)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)
model = AutoModelForTokenClassification.from_pretrained(args.output_dir)
model.to(args.device)
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="test")
output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
output_test_predictions_file = os.path.join(args.output_dir, "test_predictions.txt")
with open(output_test_predictions_file, "w") as writer:
with open(os.path.join(args.data_dir, "test.txt"), "r") as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = line.split()[0] + " " + predictions[example_id].pop(0) + "\n"
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
return results
if __name__ == "__main__":
main()
| true | true |
f717830652b40b5662e7d5601be70f7d7dfd33fa | 629 | py | Python | code/LTI/Demos/Tex_matplotlib.py | chipmuenk/acoustics | c85ac95a10c09d7fa15d63b2bdb24acab89fec60 | [
"MIT"
] | null | null | null | code/LTI/Demos/Tex_matplotlib.py | chipmuenk/acoustics | c85ac95a10c09d7fa15d63b2bdb24acab89fec60 | [
"MIT"
] | null | null | null | code/LTI/Demos/Tex_matplotlib.py | chipmuenk/acoustics | c85ac95a10c09d7fa15d63b2bdb24acab89fec60 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon May 14 14:15:52 2012
Plot mit TeX-Formatierung der Labels
(LaTeX muss auf dem Rechner installiert sein)
"""
import numpy as np
from matplotlib import rc
import matplotlib.pyplot as plt
rc('text', usetex=True)
plt.figure(1)
ax = plt.axes([0.1, 0.1, 0.8, 0.7])
t = np.arange(0.0, 1.0+0.01, 0.01)
s = np.cos(2*2*np.pi*t)+2
plt.plot(t, s)
plt.xlabel(r'\textbf{Time (s)}')
plt.ylabel(r'\textit{Voltage} (mV)',fontsize=16)
plt.title(r"\TeX\ is Number $\displaystyle\sum_{n=1}^\infty\frac{-e^{i\pi}}{2^n}$!",
fontsize=16, color='r')
plt.grid(True)
plt.savefig('tex_demo')
plt.show() | 23.296296 | 84 | 0.659777 |
import numpy as np
from matplotlib import rc
import matplotlib.pyplot as plt
rc('text', usetex=True)
plt.figure(1)
ax = plt.axes([0.1, 0.1, 0.8, 0.7])
t = np.arange(0.0, 1.0+0.01, 0.01)
s = np.cos(2*2*np.pi*t)+2
plt.plot(t, s)
plt.xlabel(r'\textbf{Time (s)}')
plt.ylabel(r'\textit{Voltage} (mV)',fontsize=16)
plt.title(r"\TeX\ is Number $\displaystyle\sum_{n=1}^\infty\frac{-e^{i\pi}}{2^n}$!",
fontsize=16, color='r')
plt.grid(True)
plt.savefig('tex_demo')
plt.show() | true | true |
f717831ab43bf5e60f20fa256ffdf13e2b588a99 | 6,871 | py | Python | env/Lib/site-packages/pyttsx/drivers/dummy.py | TrinhAnBinh/covid_vir_assistant_ver_0.0.2 | b4471f4894c1bc203980f06b811f63e8e8f6b3ab | [
"MIT"
] | 160 | 2016-10-04T22:45:36.000Z | 2022-02-10T06:41:56.000Z | env/Lib/site-packages/pyttsx/drivers/dummy.py | TrinhAnBinh/covid_vir_assistant_ver_0.0.2 | b4471f4894c1bc203980f06b811f63e8e8f6b3ab | [
"MIT"
] | 27 | 2016-10-04T02:45:18.000Z | 2022-03-09T15:15:54.000Z | env/Lib/site-packages/pyttsx/drivers/dummy.py | TrinhAnBinh/covid_vir_assistant_ver_0.0.2 | b4471f4894c1bc203980f06b811f63e8e8f6b3ab | [
"MIT"
] | 58 | 2016-10-06T16:53:43.000Z | 2021-10-21T22:17:35.000Z | '''
Dummy driver that produces no output but gives all expected callbacks. Useful
for testing and as a model for real drivers.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from ..voice import Voice
import time
def buildDriver(proxy):
'''
Builds a new instance of a driver and returns it for use by the driver
proxy.
@param proxy: Proxy creating the driver
@type proxy: L{driver.DriverProxy}
'''
return DummyDriver(proxy)
class DummyDriver(object):
'''
Dummy speech engine implementation. Documents the interface, notifications,
properties, and sequencing responsibilities of a driver implementation.
@ivar _proxy: Driver proxy that manages this instance
@type _proxy: L{driver.DriverProxy}
@ivar _config: Dummy configuration
@type _config: dict
@ivar _looping: True when in the dummy event loop, False when not
@ivar _looping: bool
'''
def __init__(self, proxy):
'''
Constructs the driver.
@param proxy: Proxy creating the driver
@type proxy: L{driver.DriverProxy}
'''
self._proxy = proxy
self._looping = False
# hold config values as if we had a real tts implementation that
# supported them
voices = [
Voice('dummy.voice1', 'John Doe', ['en-US', 'en-GB'], 'male', 'adult'),
Voice('dummy.voice2', 'Jane Doe', ['en-US', 'en-GB'], 'female', 'adult'),
Voice('dummy.voice3', 'Jimmy Doe', ['en-US', 'en-GB'], 'male', 10)
]
self._config = {
'rate' : 200,
'volume' : 1.0,
'voice' : voices[0],
'voices' : voices
}
def destroy(self):
'''
Optional method that will be called when the driver proxy is being
destroyed. Can cleanup any resources to make sure the engine terminates
properly.
'''
pass
def startLoop(self):
'''
Starts a blocking run loop in which driver callbacks are properly
invoked.
@precondition: There was no previous successful call to L{startLoop}
without an intervening call to L{stopLoop}.
'''
first = True
self._looping = True
while self._looping:
if first:
self._proxy.setBusy(False)
first = False
time.sleep(0.5)
def endLoop(self):
'''
Stops a previously started run loop.
@precondition: A previous call to L{startLoop} suceeded and there was
no intervening call to L{endLoop}.
'''
self._looping = False
def iterate(self):
'''
Iterates from within an external run loop.
'''
self._proxy.setBusy(False)
yield
def say(self, text):
'''
Speaks the given text. Generates the following notifications during
output:
started-utterance: When speech output has started
started-word: When a word is about to be spoken. Includes the character
"location" of the start of the word in the original utterance text
and the "length" of the word in characters.
finished-utterance: When speech output has finished. Includes a flag
indicating if the entire utterance was "completed" or not.
The proxy automatically adds any "name" associated with the utterance
to the notifications on behalf of the driver.
When starting to output an utterance, the driver must inform its proxy
that it is busy by invoking L{driver.DriverProxy.setBusy} with a flag
of True. When the utterance completes or is interrupted, the driver
inform the proxy that it is no longer busy by invoking
L{driver.DriverProxy.setBusy} with a flag of False.
@param text: Unicode text to speak
@type text: unicode
'''
self._proxy.setBusy(True)
self._proxy.notify('started-utterance')
i = 0
for word in text.split(' '):
self._proxy.notify('started-word', location=i, length=len(word))
try:
i = text.index(' ', i+1)+1
except Exception:
pass
self._proxy.notify('finished-utterance', completed=True)
self._proxy.setBusy(False)
def stop(self):
'''
Stops any current output. If an utterance was being spoken, the driver
is still responsible for sending the closing finished-utterance
notification documented above and resetting the busy state of the
proxy.
'''
pass
def getProperty(self, name):
'''
Gets a property value of the speech engine. The suppoted properties
and their values are:
voices: List of L{voice.Voice} objects supported by the driver
voice: String ID of the current voice
rate: Integer speech rate in words per minute
volume: Floating point volume of speech in the range [0.0, 1.0]
@param name: Property name
@type name: str
@raise KeyError: When the property name is unknown
'''
try:
return self._config[name]
except KeyError:
raise KeyError('unknown property %s' % name)
def setProperty(self, name, value):
'''
Sets one of the supported property values of the speech engine listed
above. If a value is invalid, attempts to clip it / coerce so it is
valid before giving up and firing an exception.
@param name: Property name
@type name: str
@param value: Property value
@type value: object
@raise KeyError: When the property name is unknown
@raise ValueError: When the value cannot be coerced to fit the property
'''
if name == 'voice':
v = [v for v in self._config['voices'] if v.id == value]
self._config['voice'] = v[0]
elif name == 'rate':
self._config['rate'] = value
elif name == 'volume':
self._config['volume'] = value
else:
raise KeyError('unknown property %s' % name)
| 35.601036 | 85 | 0.625382 | from ..voice import Voice
import time
def buildDriver(proxy):
return DummyDriver(proxy)
class DummyDriver(object):
def __init__(self, proxy):
self._proxy = proxy
self._looping = False
voices = [
Voice('dummy.voice1', 'John Doe', ['en-US', 'en-GB'], 'male', 'adult'),
Voice('dummy.voice2', 'Jane Doe', ['en-US', 'en-GB'], 'female', 'adult'),
Voice('dummy.voice3', 'Jimmy Doe', ['en-US', 'en-GB'], 'male', 10)
]
self._config = {
'rate' : 200,
'volume' : 1.0,
'voice' : voices[0],
'voices' : voices
}
def destroy(self):
pass
def startLoop(self):
first = True
self._looping = True
while self._looping:
if first:
self._proxy.setBusy(False)
first = False
time.sleep(0.5)
def endLoop(self):
self._looping = False
def iterate(self):
self._proxy.setBusy(False)
yield
def say(self, text):
self._proxy.setBusy(True)
self._proxy.notify('started-utterance')
i = 0
for word in text.split(' '):
self._proxy.notify('started-word', location=i, length=len(word))
try:
i = text.index(' ', i+1)+1
except Exception:
pass
self._proxy.notify('finished-utterance', completed=True)
self._proxy.setBusy(False)
def stop(self):
pass
def getProperty(self, name):
try:
return self._config[name]
except KeyError:
raise KeyError('unknown property %s' % name)
def setProperty(self, name, value):
if name == 'voice':
v = [v for v in self._config['voices'] if v.id == value]
self._config['voice'] = v[0]
elif name == 'rate':
self._config['rate'] = value
elif name == 'volume':
self._config['volume'] = value
else:
raise KeyError('unknown property %s' % name)
| true | true |
f71785323c845db207650f7917b7bc72e98e1b96 | 26,982 | py | Python | manila/tests/share/drivers/quobyte/test_quobyte.py | deiter/manila | ba94d20e823d2edad7e9bd01546cf1642b17d212 | [
"Apache-2.0"
] | 1 | 2019-05-06T10:33:38.000Z | 2019-05-06T10:33:38.000Z | manila/tests/share/drivers/quobyte/test_quobyte.py | deiter/manila | ba94d20e823d2edad7e9bd01546cf1642b17d212 | [
"Apache-2.0"
] | 4 | 2019-05-06T11:45:17.000Z | 2019-05-09T14:23:28.000Z | manila/tests/share/drivers/quobyte/test_quobyte.py | deiter/manila | ba94d20e823d2edad7e9bd01546cf1642b17d212 | [
"Apache-2.0"
] | 3 | 2019-05-03T12:32:47.000Z | 2021-01-30T20:26:19.000Z | # Copyright (c) 2015 Quobyte, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import six
from manila import context
from manila import exception
from manila.share import configuration as config
from manila.share import driver
from manila.share.drivers.quobyte import jsonrpc
from manila.share.drivers.quobyte import quobyte
from manila import test
from manila.tests import fake_share
CONF = cfg.CONF
def fake_rpc_handler(name, *args):
if name == 'resolveVolumeName':
return None
elif name == 'createVolume':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': 'fake_location',
'nfs_export_path': '/fake_share'}
elif name == 'getConfiguration':
return {
"tenant_configuration": [{
"domain_name": "fake_domain_name",
"volume_access": [
{"volume_uuid": "fake_id_1",
"restrict_to_network": "10.0.0.1",
"read_only": False},
{"volume_uuid": "fake_id_1",
"restrict_to_network": "10.0.0.2",
"read_only": False},
{"volume_uuid": "fake_id_2",
"restrict_to_network": "10.0.0.3",
"read_only": False}
]},
{"domain_name": "fake_domain_name_2",
"volume_access": [
{"volume_uuid": "fake_id_3",
"restrict_to_network": "10.0.0.4",
"read_only": False},
{"volume_uuid": "fake_id_3",
"restrict_to_network": "10.0.0.5",
"read_only": True},
{"volume_uuid": "fake_id_4",
"restrict_to_network": "10.0.0.6",
"read_only": False}
]}
]
}
else:
return "Unknown fake rpc handler call"
def create_fake_access(access_adr,
access_id='fake_access_id',
access_type='ip',
access_level='rw'):
return {
'access_id': access_id,
'access_type': access_type,
'access_to': access_adr,
'access_level': access_level
}
class QuobyteShareDriverTestCase(test.TestCase):
"""Tests QuobyteShareDriver."""
def setUp(self):
super(QuobyteShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
CONF.set_default('driver_handles_share_servers', False)
self.fake_conf = config.Configuration(None)
self._driver = quobyte.QuobyteShareDriver(configuration=self.fake_conf)
self._driver.rpc = mock.Mock()
self.share = fake_share.fake_share(share_proto='NFS')
self.access = fake_share.fake_access()
@mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc', mock.Mock())
def test_do_setup_success(self):
self._driver.rpc.call = mock.Mock(return_value=None)
self._driver.do_setup(self._context)
self._driver.rpc.call.assert_called_with('getInformation', {})
@mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc.__init__',
mock.Mock(return_value=None))
@mock.patch.object(jsonrpc.JsonRpc, 'call',
side_effect=exception.QBRpcException)
def test_do_setup_failure(self, mock_call):
self.assertRaises(exception.QBException,
self._driver.do_setup, self._context)
def test_create_share_new_volume(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
result = self._driver.create_share(self._context, self.share)
self.assertEqual(self.share['export_location'], result)
self._driver.rpc.call.assert_has_calls([
mock.call('createVolume', dict(
name=self.share['name'],
tenant_domain=self.share['project_id'],
root_user_id=self.fake_conf.quobyte_default_volume_user,
root_group_id=self.fake_conf.quobyte_default_volume_group,
configuration_name=self.fake_conf.quobyte_volume_configuration
)),
mock.call('exportVolume',
dict(protocol='NFS', volume_uuid='voluuid'))])
def test_create_share_existing_volume(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self._driver.create_share(self._context, self.share)
self._driver.rpc.call.assert_called_with(
'exportVolume', dict(protocol='NFS', volume_uuid='voluuid'))
def test_create_share_wrong_protocol(self):
share = {'share_proto': 'WRONG_PROTOCOL'}
self.assertRaises(exception.QBException,
self._driver.create_share,
context=None,
share=share)
def test_delete_share_existing_volume(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {}
self._driver.configuration.quobyte_delete_shares = True
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
self._driver.rpc.call.assert_has_calls([
mock.call('resolveVolumeName',
{'volume_name': 'fakename',
'tenant_domain': 'fake_project_uuid'}),
mock.call('deleteVolume', {'volume_uuid': 'voluuid'}),
mock.call('exportVolume', {'volume_uuid': 'voluuid',
'remove_export': True})])
def test_delete_share_existing_volume_disabled(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {}
CONF.set_default('quobyte_delete_shares', False)
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'remove_export': True})
@mock.patch.object(quobyte.LOG, 'warning')
def test_delete_share_nonexisting_volume(self, mock_warning):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return None
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
mock_warning.assert_called_with(
'No volume found for share fake_project_uuid/fakename')
def test_allow_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver._allow_access(self._context, self.share, self.access)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'read_only': False,
'add_allow_ip': '10.0.0.1'})
def test_allow_ro_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
ro_access = fake_share.fake_access(access_level='ro')
self._driver._allow_access(self._context, self.share, ro_access)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'read_only': True,
'add_allow_ip': '10.0.0.1'})
def test_allow_access_nonip(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.access = fake_share.fake_access(**{"access_type":
"non_existant_access_type"})
self.assertRaises(exception.InvalidShareAccess,
self._driver._allow_access,
self._context, self.share, self.access)
def test_deny_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver._deny_access(self._context, self.share, self.access)
self._driver.rpc.call.assert_called_with(
'exportVolume',
{'volume_uuid': 'voluuid', 'remove_allow_ip': '10.0.0.1'})
@mock.patch.object(quobyte.LOG, 'debug')
def test_deny_access_nonip(self, mock_debug):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.access = fake_share.fake_access(
access_type="non_existant_access_type")
self._driver._deny_access(self._context, self.share, self.access)
mock_debug.assert_called_with(
'Quobyte driver only supports ip access control. '
'Ignoring deny access call for %s , %s',
'fakename', 'fake_project_uuid')
def test_resolve_volume_name(self):
self._driver.rpc.call = mock.Mock(
return_value={'volume_uuid': 'fake_uuid'})
self._driver._resolve_volume_name('fake_vol_name', 'fake_domain_name')
self._driver.rpc.call.assert_called_with(
'resolveVolumeName',
{'volume_name': 'fake_vol_name',
'tenant_domain': 'fake_domain_name'})
def test_resolve_volume_name_NOENT(self):
self._driver.rpc.call = mock.Mock(
return_value=None)
self.assertIsNone(
self._driver._resolve_volume_name('fake_vol_name',
'fake_domain_name'))
def test_resolve_volume_name_other_error(self):
self._driver.rpc.call = mock.Mock(
side_effect=exception.QBRpcException(
result='fubar',
qbcode=666))
self.assertRaises(exception.QBRpcException,
self._driver._resolve_volume_name,
volume_name='fake_vol_name',
tenant_domain='fake_domain_name')
@mock.patch.object(driver.ShareDriver, '_update_share_stats')
def test_update_share_stats(self, mock_uss):
self._driver._get_capacities = mock.Mock(return_value=[42, 23])
self._driver._update_share_stats()
mock_uss.assert_called_once_with(
dict(storage_protocol='NFS',
vendor_name='Quobyte',
share_backend_name=self._driver.backend_name,
driver_version=self._driver.DRIVER_VERSION,
total_capacity_gb=42,
free_capacity_gb=23,
reserved_percentage=0))
def test_get_capacities_gb(self):
capval = 42115548133
useval = 19695128917
self._driver.rpc.call = mock.Mock(
return_value={'total_logical_capacity': six.text_type(capval),
'total_logical_usage': six.text_type(useval)})
self.assertEqual((39.223160718, 20.880642548),
self._driver._get_capacities())
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value="fake_uuid")
def test_ensure_share(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
result = self._driver.ensure_share(self._context, self.share, None)
self.assertEqual(self.share["export_location"], result)
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
self._driver.rpc.call.assert_has_calls([
mock.call('exportVolume', dict(
volume_uuid="fake_uuid",
protocol='NFS'
))])
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value=None)
def test_ensure_deleted_share(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.assertRaises(exception.ShareResourceNotFound,
self._driver.ensure_share,
self._context, self.share, None)
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
@mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share")
def test_extend_share(self, mock_qsd_resize_share):
self._driver.extend_share(ext_share=self.share,
ext_size=2,
share_server=None)
mock_qsd_resize_share.assert_called_once_with(share=self.share,
new_size=2)
def test_resize_share(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self._driver._resize_share(share=self.share, new_size=7)
self._driver.rpc.call.assert_has_calls([
mock.call('setQuota',
{"consumer": {"type": 3,
"identifier": self.share["name"]},
"limits": {"type": 5, "value": 7}})])
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value="fake_id_3")
def test_fetch_existing_access(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.4")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.5")
exist_list = self._driver._fetch_existing_access(context=self._context,
share=self.share)
# assert expected result here
self.assertEqual([old_access_1['access_to'],
old_access_2['access_to']],
[e.get('access_to') for e in exist_list])
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
@mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share")
def test_shrink_share(self, mock_qsd_resize_share):
self._driver.shrink_share(shrink_share=self.share,
shrink_size=3,
share_server=None)
mock_qsd_resize_share.assert_called_once_with(share=self.share,
new_size=3)
def test_subtract_access_lists(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_type="rw",)
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_type="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_type="ro")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_type="rw")
access_5 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4",
access_type="rw")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_3, access_2]
self.assertEqual([access_1, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
def test_subtract_access_lists_level(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_level="rw")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_level="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_level="rw")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_level="rw")
access_5 = create_fake_access(access_id="old_2_ro",
access_adr="10.0.0.3",
access_level="ro")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_2]
self.assertEqual([access_1, access_3, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
def test_subtract_access_lists_type(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_type="ip")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_type="ip")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_type="ip")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_type="ip")
access_5 = create_fake_access(access_id="old_2_ro",
access_adr="10.0.0.3",
access_type="other")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_2]
self.assertEqual([access_1, access_3, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
def test_update_access_add_delete(self, qb_deny_mock, qb_allow_mock):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_level="rw")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_level="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_level="rw")
self._driver.update_access(self._context,
self.share,
access_rules=None,
add_rules=[access_1],
delete_rules=[access_2, access_3])
qb_allow_mock.assert_called_once_with(self._context,
self.share, access_1)
deny_calls = [mock.call(self._context, self.share, access_2),
mock.call(self._context, self.share, access_3)]
qb_deny_mock.assert_has_calls(deny_calls)
@mock.patch.object(quobyte.LOG, "warning")
def test_update_access_no_rules(self, qb_log_mock):
self._driver.update_access(context=None, share=None, access_rules=[],
add_rules=[], delete_rules=[])
qb_log_mock.assert_has_calls([mock.ANY])
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
def test_update_access_recovery_additionals(self,
qb_allow_mock,
qb_exist_mock,
qb_subtr_mock):
new_access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.2")
old_access = create_fake_access(access_id="fake_access_id",
access_adr="10.0.0.1")
new_access_2 = create_fake_access(access_id="new_2",
access_adr="10.0.0.3")
add_access_rules = [new_access_1,
old_access,
new_access_2]
qb_exist_mock.return_value = [old_access]
qb_subtr_mock.side_effect = [[new_access_1, new_access_2], []]
self._driver.update_access(self._context, self.share,
access_rules=add_access_rules, add_rules=[],
delete_rules=[])
assert_calls = [mock.call(self._context, self.share, new_access_1),
mock.call(self._context, self.share, new_access_2)]
qb_allow_mock.assert_has_calls(assert_calls, any_order=True)
qb_exist_mock.assert_called_once_with(self._context, self.share)
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
def test_update_access_recovery_superfluous(self,
qb_deny_mock,
qb_exist_mock,
qb_subtr_mock):
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1")
missing_access_1 = create_fake_access(access_id="mis_1",
access_adr="10.0.0.2")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3")
qb_exist_mock.side_effect = [[old_access_1, old_access_2]]
qb_subtr_mock.side_effect = [[], [missing_access_1]]
old_access_rules = [old_access_1, old_access_2]
self._driver.update_access(self._context, self.share,
access_rules=old_access_rules, add_rules=[],
delete_rules=[])
qb_deny_mock.assert_called_once_with(self._context,
self.share,
(missing_access_1))
qb_exist_mock.assert_called_once_with(self._context, self.share)
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
def test_update_access_recovery_add_superfluous(self,
qb_allow_mock,
qb_deny_mock,
qb_exist_mock,
qb_subtr_mock):
new_access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5")
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3")
old_access_3 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4")
miss_access_1 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4")
new_access_2 = create_fake_access(access_id="new_2",
access_adr="10.0.0.3",
access_level="ro")
new_access_rules = [new_access_1, old_access_1, old_access_2,
old_access_3, new_access_2]
qb_exist_mock.return_value = [old_access_1, old_access_2,
old_access_3, miss_access_1]
qb_subtr_mock.side_effect = [[new_access_1, new_access_2],
[miss_access_1, old_access_2]]
self._driver.update_access(self._context, self.share,
new_access_rules, add_rules=[],
delete_rules=[])
a_calls = [mock.call(self._context, self.share, new_access_1),
mock.call(self._context, self.share, new_access_2)]
qb_allow_mock.assert_has_calls(a_calls)
b_calls = [mock.call(self._context, self.share, miss_access_1),
mock.call(self._context, self.share, old_access_2)]
qb_deny_mock.assert_has_calls(b_calls)
qb_exist_mock.assert_called_once_with(self._context, self.share)
| 44.746269 | 79 | 0.552998 |
import mock
from oslo_config import cfg
import six
from manila import context
from manila import exception
from manila.share import configuration as config
from manila.share import driver
from manila.share.drivers.quobyte import jsonrpc
from manila.share.drivers.quobyte import quobyte
from manila import test
from manila.tests import fake_share
CONF = cfg.CONF
def fake_rpc_handler(name, *args):
if name == 'resolveVolumeName':
return None
elif name == 'createVolume':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': 'fake_location',
'nfs_export_path': '/fake_share'}
elif name == 'getConfiguration':
return {
"tenant_configuration": [{
"domain_name": "fake_domain_name",
"volume_access": [
{"volume_uuid": "fake_id_1",
"restrict_to_network": "10.0.0.1",
"read_only": False},
{"volume_uuid": "fake_id_1",
"restrict_to_network": "10.0.0.2",
"read_only": False},
{"volume_uuid": "fake_id_2",
"restrict_to_network": "10.0.0.3",
"read_only": False}
]},
{"domain_name": "fake_domain_name_2",
"volume_access": [
{"volume_uuid": "fake_id_3",
"restrict_to_network": "10.0.0.4",
"read_only": False},
{"volume_uuid": "fake_id_3",
"restrict_to_network": "10.0.0.5",
"read_only": True},
{"volume_uuid": "fake_id_4",
"restrict_to_network": "10.0.0.6",
"read_only": False}
]}
]
}
else:
return "Unknown fake rpc handler call"
def create_fake_access(access_adr,
access_id='fake_access_id',
access_type='ip',
access_level='rw'):
return {
'access_id': access_id,
'access_type': access_type,
'access_to': access_adr,
'access_level': access_level
}
class QuobyteShareDriverTestCase(test.TestCase):
def setUp(self):
super(QuobyteShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
CONF.set_default('driver_handles_share_servers', False)
self.fake_conf = config.Configuration(None)
self._driver = quobyte.QuobyteShareDriver(configuration=self.fake_conf)
self._driver.rpc = mock.Mock()
self.share = fake_share.fake_share(share_proto='NFS')
self.access = fake_share.fake_access()
@mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc', mock.Mock())
def test_do_setup_success(self):
self._driver.rpc.call = mock.Mock(return_value=None)
self._driver.do_setup(self._context)
self._driver.rpc.call.assert_called_with('getInformation', {})
@mock.patch('manila.share.drivers.quobyte.jsonrpc.JsonRpc.__init__',
mock.Mock(return_value=None))
@mock.patch.object(jsonrpc.JsonRpc, 'call',
side_effect=exception.QBRpcException)
def test_do_setup_failure(self, mock_call):
self.assertRaises(exception.QBException,
self._driver.do_setup, self._context)
def test_create_share_new_volume(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
result = self._driver.create_share(self._context, self.share)
self.assertEqual(self.share['export_location'], result)
self._driver.rpc.call.assert_has_calls([
mock.call('createVolume', dict(
name=self.share['name'],
tenant_domain=self.share['project_id'],
root_user_id=self.fake_conf.quobyte_default_volume_user,
root_group_id=self.fake_conf.quobyte_default_volume_group,
configuration_name=self.fake_conf.quobyte_volume_configuration
)),
mock.call('exportVolume',
dict(protocol='NFS', volume_uuid='voluuid'))])
def test_create_share_existing_volume(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self._driver.create_share(self._context, self.share)
self._driver.rpc.call.assert_called_with(
'exportVolume', dict(protocol='NFS', volume_uuid='voluuid'))
def test_create_share_wrong_protocol(self):
share = {'share_proto': 'WRONG_PROTOCOL'}
self.assertRaises(exception.QBException,
self._driver.create_share,
context=None,
share=share)
def test_delete_share_existing_volume(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {}
self._driver.configuration.quobyte_delete_shares = True
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
self._driver.rpc.call.assert_has_calls([
mock.call('resolveVolumeName',
{'volume_name': 'fakename',
'tenant_domain': 'fake_project_uuid'}),
mock.call('deleteVolume', {'volume_uuid': 'voluuid'}),
mock.call('exportVolume', {'volume_uuid': 'voluuid',
'remove_export': True})])
def test_delete_share_existing_volume_disabled(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {}
CONF.set_default('quobyte_delete_shares', False)
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'remove_export': True})
@mock.patch.object(quobyte.LOG, 'warning')
def test_delete_share_nonexisting_volume(self, mock_warning):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return None
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver.delete_share(self._context, self.share)
mock_warning.assert_called_with(
'No volume found for share fake_project_uuid/fakename')
def test_allow_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver._allow_access(self._context, self.share, self.access)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'read_only': False,
'add_allow_ip': '10.0.0.1'})
def test_allow_ro_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
ro_access = fake_share.fake_access(access_level='ro')
self._driver._allow_access(self._context, self.share, ro_access)
self._driver.rpc.call.assert_called_with(
'exportVolume', {'volume_uuid': 'voluuid',
'read_only': True,
'add_allow_ip': '10.0.0.1'})
def test_allow_access_nonip(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.access = fake_share.fake_access(**{"access_type":
"non_existant_access_type"})
self.assertRaises(exception.InvalidShareAccess,
self._driver._allow_access,
self._context, self.share, self.access)
def test_deny_access(self):
def rpc_handler(name, *args):
if name == 'resolveVolumeName':
return {'volume_uuid': 'voluuid'}
elif name == 'exportVolume':
return {'nfs_server_ip': '10.10.1.1',
'nfs_export_path': '/voluuid'}
self._driver.rpc.call = mock.Mock(wraps=rpc_handler)
self._driver._deny_access(self._context, self.share, self.access)
self._driver.rpc.call.assert_called_with(
'exportVolume',
{'volume_uuid': 'voluuid', 'remove_allow_ip': '10.0.0.1'})
@mock.patch.object(quobyte.LOG, 'debug')
def test_deny_access_nonip(self, mock_debug):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.access = fake_share.fake_access(
access_type="non_existant_access_type")
self._driver._deny_access(self._context, self.share, self.access)
mock_debug.assert_called_with(
'Quobyte driver only supports ip access control. '
'Ignoring deny access call for %s , %s',
'fakename', 'fake_project_uuid')
def test_resolve_volume_name(self):
self._driver.rpc.call = mock.Mock(
return_value={'volume_uuid': 'fake_uuid'})
self._driver._resolve_volume_name('fake_vol_name', 'fake_domain_name')
self._driver.rpc.call.assert_called_with(
'resolveVolumeName',
{'volume_name': 'fake_vol_name',
'tenant_domain': 'fake_domain_name'})
def test_resolve_volume_name_NOENT(self):
self._driver.rpc.call = mock.Mock(
return_value=None)
self.assertIsNone(
self._driver._resolve_volume_name('fake_vol_name',
'fake_domain_name'))
def test_resolve_volume_name_other_error(self):
self._driver.rpc.call = mock.Mock(
side_effect=exception.QBRpcException(
result='fubar',
qbcode=666))
self.assertRaises(exception.QBRpcException,
self._driver._resolve_volume_name,
volume_name='fake_vol_name',
tenant_domain='fake_domain_name')
@mock.patch.object(driver.ShareDriver, '_update_share_stats')
def test_update_share_stats(self, mock_uss):
self._driver._get_capacities = mock.Mock(return_value=[42, 23])
self._driver._update_share_stats()
mock_uss.assert_called_once_with(
dict(storage_protocol='NFS',
vendor_name='Quobyte',
share_backend_name=self._driver.backend_name,
driver_version=self._driver.DRIVER_VERSION,
total_capacity_gb=42,
free_capacity_gb=23,
reserved_percentage=0))
def test_get_capacities_gb(self):
capval = 42115548133
useval = 19695128917
self._driver.rpc.call = mock.Mock(
return_value={'total_logical_capacity': six.text_type(capval),
'total_logical_usage': six.text_type(useval)})
self.assertEqual((39.223160718, 20.880642548),
self._driver._get_capacities())
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value="fake_uuid")
def test_ensure_share(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
result = self._driver.ensure_share(self._context, self.share, None)
self.assertEqual(self.share["export_location"], result)
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
self._driver.rpc.call.assert_has_calls([
mock.call('exportVolume', dict(
volume_uuid="fake_uuid",
protocol='NFS'
))])
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value=None)
def test_ensure_deleted_share(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self.assertRaises(exception.ShareResourceNotFound,
self._driver.ensure_share,
self._context, self.share, None)
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
@mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share")
def test_extend_share(self, mock_qsd_resize_share):
self._driver.extend_share(ext_share=self.share,
ext_size=2,
share_server=None)
mock_qsd_resize_share.assert_called_once_with(share=self.share,
new_size=2)
def test_resize_share(self):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
self._driver._resize_share(share=self.share, new_size=7)
self._driver.rpc.call.assert_has_calls([
mock.call('setQuota',
{"consumer": {"type": 3,
"identifier": self.share["name"]},
"limits": {"type": 5, "value": 7}})])
@mock.patch.object(quobyte.QuobyteShareDriver,
"_resolve_volume_name",
return_value="fake_id_3")
def test_fetch_existing_access(self, mock_qb_resolve_volname):
self._driver.rpc.call = mock.Mock(wraps=fake_rpc_handler)
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.4")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.5")
exist_list = self._driver._fetch_existing_access(context=self._context,
share=self.share)
self.assertEqual([old_access_1['access_to'],
old_access_2['access_to']],
[e.get('access_to') for e in exist_list])
(mock_qb_resolve_volname.
assert_called_once_with(self.share['name'],
self.share['project_id']))
@mock.patch.object(quobyte.QuobyteShareDriver, "_resize_share")
def test_shrink_share(self, mock_qsd_resize_share):
self._driver.shrink_share(shrink_share=self.share,
shrink_size=3,
share_server=None)
mock_qsd_resize_share.assert_called_once_with(share=self.share,
new_size=3)
def test_subtract_access_lists(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_type="rw",)
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_type="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_type="ro")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_type="rw")
access_5 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4",
access_type="rw")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_3, access_2]
self.assertEqual([access_1, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
def test_subtract_access_lists_level(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_level="rw")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_level="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_level="rw")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_level="rw")
access_5 = create_fake_access(access_id="old_2_ro",
access_adr="10.0.0.3",
access_level="ro")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_2]
self.assertEqual([access_1, access_3, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
def test_subtract_access_lists_type(self):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_type="ip")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_type="ip")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_type="ip")
access_4 = create_fake_access(access_id="new_2",
access_adr="10.0.0.6",
access_type="ip")
access_5 = create_fake_access(access_id="old_2_ro",
access_adr="10.0.0.3",
access_type="other")
min_list = [access_1, access_2, access_3, access_4]
sub_list = [access_5, access_2]
self.assertEqual([access_1, access_3, access_4],
self._driver._subtract_access_lists(min_list,
sub_list))
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
def test_update_access_add_delete(self, qb_deny_mock, qb_allow_mock):
access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5",
access_level="rw")
access_2 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1",
access_level="rw")
access_3 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3",
access_level="rw")
self._driver.update_access(self._context,
self.share,
access_rules=None,
add_rules=[access_1],
delete_rules=[access_2, access_3])
qb_allow_mock.assert_called_once_with(self._context,
self.share, access_1)
deny_calls = [mock.call(self._context, self.share, access_2),
mock.call(self._context, self.share, access_3)]
qb_deny_mock.assert_has_calls(deny_calls)
@mock.patch.object(quobyte.LOG, "warning")
def test_update_access_no_rules(self, qb_log_mock):
self._driver.update_access(context=None, share=None, access_rules=[],
add_rules=[], delete_rules=[])
qb_log_mock.assert_has_calls([mock.ANY])
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
def test_update_access_recovery_additionals(self,
qb_allow_mock,
qb_exist_mock,
qb_subtr_mock):
new_access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.2")
old_access = create_fake_access(access_id="fake_access_id",
access_adr="10.0.0.1")
new_access_2 = create_fake_access(access_id="new_2",
access_adr="10.0.0.3")
add_access_rules = [new_access_1,
old_access,
new_access_2]
qb_exist_mock.return_value = [old_access]
qb_subtr_mock.side_effect = [[new_access_1, new_access_2], []]
self._driver.update_access(self._context, self.share,
access_rules=add_access_rules, add_rules=[],
delete_rules=[])
assert_calls = [mock.call(self._context, self.share, new_access_1),
mock.call(self._context, self.share, new_access_2)]
qb_allow_mock.assert_has_calls(assert_calls, any_order=True)
qb_exist_mock.assert_called_once_with(self._context, self.share)
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
def test_update_access_recovery_superfluous(self,
qb_deny_mock,
qb_exist_mock,
qb_subtr_mock):
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1")
missing_access_1 = create_fake_access(access_id="mis_1",
access_adr="10.0.0.2")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3")
qb_exist_mock.side_effect = [[old_access_1, old_access_2]]
qb_subtr_mock.side_effect = [[], [missing_access_1]]
old_access_rules = [old_access_1, old_access_2]
self._driver.update_access(self._context, self.share,
access_rules=old_access_rules, add_rules=[],
delete_rules=[])
qb_deny_mock.assert_called_once_with(self._context,
self.share,
(missing_access_1))
qb_exist_mock.assert_called_once_with(self._context, self.share)
@mock.patch.object(quobyte.QuobyteShareDriver, "_subtract_access_lists")
@mock.patch.object(quobyte.QuobyteShareDriver, "_fetch_existing_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_deny_access")
@mock.patch.object(quobyte.QuobyteShareDriver, "_allow_access")
def test_update_access_recovery_add_superfluous(self,
qb_allow_mock,
qb_deny_mock,
qb_exist_mock,
qb_subtr_mock):
new_access_1 = create_fake_access(access_id="new_1",
access_adr="10.0.0.5")
old_access_1 = create_fake_access(access_id="old_1",
access_adr="10.0.0.1")
old_access_2 = create_fake_access(access_id="old_2",
access_adr="10.0.0.3")
old_access_3 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4")
miss_access_1 = create_fake_access(access_id="old_3",
access_adr="10.0.0.4")
new_access_2 = create_fake_access(access_id="new_2",
access_adr="10.0.0.3",
access_level="ro")
new_access_rules = [new_access_1, old_access_1, old_access_2,
old_access_3, new_access_2]
qb_exist_mock.return_value = [old_access_1, old_access_2,
old_access_3, miss_access_1]
qb_subtr_mock.side_effect = [[new_access_1, new_access_2],
[miss_access_1, old_access_2]]
self._driver.update_access(self._context, self.share,
new_access_rules, add_rules=[],
delete_rules=[])
a_calls = [mock.call(self._context, self.share, new_access_1),
mock.call(self._context, self.share, new_access_2)]
qb_allow_mock.assert_has_calls(a_calls)
b_calls = [mock.call(self._context, self.share, miss_access_1),
mock.call(self._context, self.share, old_access_2)]
qb_deny_mock.assert_has_calls(b_calls)
qb_exist_mock.assert_called_once_with(self._context, self.share)
| true | true |
f7178564a143b7e9c9ec5547d04715becbaafd35 | 1,183 | py | Python | scripts/pyqtgraph-develop/examples/MultiPlotWidget.py | kuldeepaman/tf-pose | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | [
"Apache-2.0"
] | null | null | null | scripts/pyqtgraph-develop/examples/MultiPlotWidget.py | kuldeepaman/tf-pose | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | [
"Apache-2.0"
] | null | null | null | scripts/pyqtgraph-develop/examples/MultiPlotWidget.py | kuldeepaman/tf-pose | 8050912c52a7b4f3c8a2656f267d47ba21d093f6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
## Add path to library (just for examples; you do not need this)
import initExample
from scipy import random
from numpy import linspace
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from pyqtgraph import MultiPlotWidget
try:
from pyqtgraph.metaarray import *
except:
print("MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)")
exit()
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
mw.resize(800,800)
pw = MultiPlotWidget()
mw.setCentralWidget(pw)
mw.show()
data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
ma = MetaArray(data, info=[
{'name': 'Signal', 'cols': [
{'name': 'Col1', 'units': 'V'},
{'name': 'Col2', 'units': 'A'},
{'name': 'Col3'},
]},
{'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}
])
pw.plot(ma)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 28.166667 | 103 | 0.618766 |
port linspace
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from pyqtgraph import MultiPlotWidget
try:
from pyqtgraph.metaarray import *
except:
print("MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)")
exit()
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
mw.resize(800,800)
pw = MultiPlotWidget()
mw.setCentralWidget(pw)
mw.show()
data = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])
ma = MetaArray(data, info=[
{'name': 'Signal', 'cols': [
{'name': 'Col1', 'units': 'V'},
{'name': 'Col2', 'units': 'A'},
{'name': 'Col3'},
]},
{'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}
])
pw.plot(ma)
lags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| true | true |
f71786f5f39627c1fac7f1b3ad0367fe2a1feb53 | 1,888 | py | Python | silk-src/src/pysilk/python/_netsa_silk.py | mjschultz/netsa-pkg | 07bf4ff29a73ebc0f58e4aa27d3ad6b1dee7fc83 | [
"Apache-2.0"
] | 3 | 2018-06-01T06:55:14.000Z | 2021-11-14T22:51:04.000Z | silk-src/src/pysilk/python/_netsa_silk.py | mjschultz/netsa-pkg | 07bf4ff29a73ebc0f58e4aa27d3ad6b1dee7fc83 | [
"Apache-2.0"
] | 3 | 2017-07-02T17:03:34.000Z | 2021-09-09T17:05:31.000Z | silk-src/src/pysilk/python/_netsa_silk.py | mjschultz/netsa-pkg | 07bf4ff29a73ebc0f58e4aa27d3ad6b1dee7fc83 | [
"Apache-2.0"
] | 4 | 2017-08-14T15:42:31.000Z | 2022-01-24T16:24:27.000Z | #######################################################################
# Copyright (C) 2011-2020 by Carnegie Mellon University.
#
# @OPENSOURCE_LICENSE_START@
# See license information in ../../../LICENSE.txt
# @OPENSOURCE_LICENSE_END@
#
#######################################################################
#######################################################################
# $SiLK: _netsa_silk.py ef14e54179be 2020-04-14 21:57:45Z mthomas $
#######################################################################
"""
The netsa_silk module contains a shared API for working with common
Internet data in both netsa-python and PySiLK. If netsa-python is
installed but PySiLK is not, the less efficient but more portable
pure-Python version of this functionality that is included in
netsa-python is used. If PySiLK is installed, then the
high-performance C version of this functionality that is included in
PySiLK is used.
"""
# This module provides the symbols exported by PySiLK for the
# netsa_silk API. It exists to rename PySiLK symbols that have a
# different name from the netsa_silk symbols, and to constrain the set
# of PySiLK symbols that are exported. If a new symbol is added (to
# provide a new feature), it need only be added here and it will
# automatically be exported by netsa_silk.
from silk import (
ipv6_enabled as has_IPv6Addr,
IPAddr, IPv4Addr, IPv6Addr,
IPSet as ip_set,
IPWildcard,
TCPFlags,
TCP_FIN, TCP_SYN, TCP_RST, TCP_PSH, TCP_ACK, TCP_URG, TCP_ECE, TCP_CWR,
silk_version
)
# PySiLK API version
__version__ = "1.0"
# Implementation version
__impl_version__ = " ".join(["SiLK", silk_version()])
__all__ = """
has_IPv6Addr
IPAddr IPv4Addr IPv6Addr
ip_set
IPWildcard
TCPFlags
TCP_FIN TCP_SYN TCP_RST TCP_PSH TCP_ACK TCP_URG TCP_ECE TCP_CWR
__version__
__impl_version__
""".split()
| 31.466667 | 75 | 0.632945 | true | true | |
f717870bb59f4f607b6c5181895c66f109cd7bcf | 523 | py | Python | juriscraper/opinions/united_states/state/calctapp_1st.py | EvandoBlanco/juriscraper | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | [
"BSD-2-Clause"
] | 228 | 2015-01-23T04:41:39.000Z | 2022-03-30T09:52:20.000Z | juriscraper/opinions/united_states/state/calctapp_1st.py | EvandoBlanco/juriscraper | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | [
"BSD-2-Clause"
] | 331 | 2015-01-05T18:53:40.000Z | 2022-03-29T23:43:30.000Z | juriscraper/opinions/united_states/state/calctapp_1st.py | EvandoBlanco/juriscraper | 3d16af258620d4ba1b4827f66ef69e8a2c5a0484 | [
"BSD-2-Clause"
] | 84 | 2015-01-03T01:19:21.000Z | 2022-03-01T08:09:32.000Z | # Scraper for California's First District Court of Appeal
# CourtID: calctapp_1st
# Court Short Name: Cal. Ct. App.
from juriscraper.opinions.united_states.state import cal
class Site(cal.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_code = "A"
self.division = "1st App. Dist."
self.url = self.build_url()
def _get_divisions(self):
return [self.division] * len(self.case_names)
| 29.055556 | 57 | 0.667304 |
# CourtID: calctapp_1st
# Court Short Name: Cal. Ct. App.
from juriscraper.opinions.united_states.state import cal
class Site(cal.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_code = "A"
self.division = "1st App. Dist."
self.url = self.build_url()
def _get_divisions(self):
return [self.division] * len(self.case_names)
| true | true |
f71787192e7a235fa2e02fbfdcda490a79b3300e | 12,258 | py | Python | source/deepsecurity/models/anti_malware_computer_extension.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:09.000Z | 2021-10-30T16:40:09.000Z | source/deepsecurity/models/anti_malware_computer_extension.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-07-28T20:19:03.000Z | 2021-07-28T20:19:03.000Z | source/deepsecurity/models/anti_malware_computer_extension.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:02.000Z | 2021-10-30T16:40:02.000Z | # coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from deepsecurity.models.computer_module_status import ComputerModuleStatus # noqa: F401,E501
class AntiMalwareComputerExtension(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'state': 'str',
'module_status': 'ComputerModuleStatus',
'real_time_scan_configuration_id': 'int',
'real_time_scan_schedule_id': 'int',
'manual_scan_configuration_id': 'int',
'scheduled_scan_configuration_id': 'int',
'last_manual_scan': 'int',
'last_scheduled_scan': 'int'
}
attribute_map = {
'state': 'state',
'module_status': 'moduleStatus',
'real_time_scan_configuration_id': 'realTimeScanConfigurationID',
'real_time_scan_schedule_id': 'realTimeScanScheduleID',
'manual_scan_configuration_id': 'manualScanConfigurationID',
'scheduled_scan_configuration_id': 'scheduledScanConfigurationID',
'last_manual_scan': 'lastManualScan',
'last_scheduled_scan': 'lastScheduledScan'
}
def __init__(self, state=None, module_status=None, real_time_scan_configuration_id=None, real_time_scan_schedule_id=None, manual_scan_configuration_id=None, scheduled_scan_configuration_id=None, last_manual_scan=None, last_scheduled_scan=None): # noqa: E501
"""AntiMalwareComputerExtension - a model defined in Swagger""" # noqa: E501
self._state = None
self._module_status = None
self._real_time_scan_configuration_id = None
self._real_time_scan_schedule_id = None
self._manual_scan_configuration_id = None
self._scheduled_scan_configuration_id = None
self._last_manual_scan = None
self._last_scheduled_scan = None
self.discriminator = None
if state is not None:
self.state = state
if module_status is not None:
self.module_status = module_status
if real_time_scan_configuration_id is not None:
self.real_time_scan_configuration_id = real_time_scan_configuration_id
if real_time_scan_schedule_id is not None:
self.real_time_scan_schedule_id = real_time_scan_schedule_id
if manual_scan_configuration_id is not None:
self.manual_scan_configuration_id = manual_scan_configuration_id
if scheduled_scan_configuration_id is not None:
self.scheduled_scan_configuration_id = scheduled_scan_configuration_id
if last_manual_scan is not None:
self.last_manual_scan = last_manual_scan
if last_scheduled_scan is not None:
self.last_scheduled_scan = last_scheduled_scan
@property
def state(self):
"""Gets the state of this AntiMalwareComputerExtension. # noqa: E501
Module state. # noqa: E501
:return: The state of this AntiMalwareComputerExtension. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this AntiMalwareComputerExtension.
Module state. # noqa: E501
:param state: The state of this AntiMalwareComputerExtension. # noqa: E501
:type: str
"""
allowed_values = ["inherited", "on", "off"] # noqa: E501
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def module_status(self):
"""Gets the module_status of this AntiMalwareComputerExtension. # noqa: E501
:return: The module_status of this AntiMalwareComputerExtension. # noqa: E501
:rtype: ComputerModuleStatus
"""
return self._module_status
@module_status.setter
def module_status(self, module_status):
"""Sets the module_status of this AntiMalwareComputerExtension.
:param module_status: The module_status of this AntiMalwareComputerExtension. # noqa: E501
:type: ComputerModuleStatus
"""
self._module_status = module_status
@property
def real_time_scan_configuration_id(self):
"""Gets the real_time_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
ID of the Real Time Scan Configuration. # noqa: E501
:return: The real_time_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._real_time_scan_configuration_id
@real_time_scan_configuration_id.setter
def real_time_scan_configuration_id(self, real_time_scan_configuration_id):
"""Sets the real_time_scan_configuration_id of this AntiMalwareComputerExtension.
ID of the Real Time Scan Configuration. # noqa: E501
:param real_time_scan_configuration_id: The real_time_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._real_time_scan_configuration_id = real_time_scan_configuration_id
@property
def real_time_scan_schedule_id(self):
"""Gets the real_time_scan_schedule_id of this AntiMalwareComputerExtension. # noqa: E501
ID of the Real Time Schedule. # noqa: E501
:return: The real_time_scan_schedule_id of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._real_time_scan_schedule_id
@real_time_scan_schedule_id.setter
def real_time_scan_schedule_id(self, real_time_scan_schedule_id):
"""Sets the real_time_scan_schedule_id of this AntiMalwareComputerExtension.
ID of the Real Time Schedule. # noqa: E501
:param real_time_scan_schedule_id: The real_time_scan_schedule_id of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._real_time_scan_schedule_id = real_time_scan_schedule_id
@property
def manual_scan_configuration_id(self):
"""Gets the manual_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
ID of the Manual Scan Configuration. # noqa: E501
:return: The manual_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._manual_scan_configuration_id
@manual_scan_configuration_id.setter
def manual_scan_configuration_id(self, manual_scan_configuration_id):
"""Sets the manual_scan_configuration_id of this AntiMalwareComputerExtension.
ID of the Manual Scan Configuration. # noqa: E501
:param manual_scan_configuration_id: The manual_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._manual_scan_configuration_id = manual_scan_configuration_id
@property
def scheduled_scan_configuration_id(self):
"""Gets the scheduled_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
ID of the Scheduled Scan Configuration. # noqa: E501
:return: The scheduled_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._scheduled_scan_configuration_id
@scheduled_scan_configuration_id.setter
def scheduled_scan_configuration_id(self, scheduled_scan_configuration_id):
"""Sets the scheduled_scan_configuration_id of this AntiMalwareComputerExtension.
ID of the Scheduled Scan Configuration. # noqa: E501
:param scheduled_scan_configuration_id: The scheduled_scan_configuration_id of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._scheduled_scan_configuration_id = scheduled_scan_configuration_id
@property
def last_manual_scan(self):
"""Gets the last_manual_scan of this AntiMalwareComputerExtension. # noqa: E501
Timestamp of the last manual scan for malware, in milliseconds since epoch. # noqa: E501
:return: The last_manual_scan of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._last_manual_scan
@last_manual_scan.setter
def last_manual_scan(self, last_manual_scan):
"""Sets the last_manual_scan of this AntiMalwareComputerExtension.
Timestamp of the last manual scan for malware, in milliseconds since epoch. # noqa: E501
:param last_manual_scan: The last_manual_scan of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._last_manual_scan = last_manual_scan
@property
def last_scheduled_scan(self):
"""Gets the last_scheduled_scan of this AntiMalwareComputerExtension. # noqa: E501
Timestamp of the last scheduled scan for malware, in milliseconds since epoch. # noqa: E501
:return: The last_scheduled_scan of this AntiMalwareComputerExtension. # noqa: E501
:rtype: int
"""
return self._last_scheduled_scan
@last_scheduled_scan.setter
def last_scheduled_scan(self, last_scheduled_scan):
"""Sets the last_scheduled_scan of this AntiMalwareComputerExtension.
Timestamp of the last scheduled scan for malware, in milliseconds since epoch. # noqa: E501
:param last_scheduled_scan: The last_scheduled_scan of this AntiMalwareComputerExtension. # noqa: E501
:type: int
"""
self._last_scheduled_scan = last_scheduled_scan
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AntiMalwareComputerExtension, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AntiMalwareComputerExtension):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.186916 | 311 | 0.660548 |
import pprint
import re
import six
from deepsecurity.models.computer_module_status import ComputerModuleStatus
class AntiMalwareComputerExtension(object):
swagger_types = {
'state': 'str',
'module_status': 'ComputerModuleStatus',
'real_time_scan_configuration_id': 'int',
'real_time_scan_schedule_id': 'int',
'manual_scan_configuration_id': 'int',
'scheduled_scan_configuration_id': 'int',
'last_manual_scan': 'int',
'last_scheduled_scan': 'int'
}
attribute_map = {
'state': 'state',
'module_status': 'moduleStatus',
'real_time_scan_configuration_id': 'realTimeScanConfigurationID',
'real_time_scan_schedule_id': 'realTimeScanScheduleID',
'manual_scan_configuration_id': 'manualScanConfigurationID',
'scheduled_scan_configuration_id': 'scheduledScanConfigurationID',
'last_manual_scan': 'lastManualScan',
'last_scheduled_scan': 'lastScheduledScan'
}
def __init__(self, state=None, module_status=None, real_time_scan_configuration_id=None, real_time_scan_schedule_id=None, manual_scan_configuration_id=None, scheduled_scan_configuration_id=None, last_manual_scan=None, last_scheduled_scan=None):
self._state = None
self._module_status = None
self._real_time_scan_configuration_id = None
self._real_time_scan_schedule_id = None
self._manual_scan_configuration_id = None
self._scheduled_scan_configuration_id = None
self._last_manual_scan = None
self._last_scheduled_scan = None
self.discriminator = None
if state is not None:
self.state = state
if module_status is not None:
self.module_status = module_status
if real_time_scan_configuration_id is not None:
self.real_time_scan_configuration_id = real_time_scan_configuration_id
if real_time_scan_schedule_id is not None:
self.real_time_scan_schedule_id = real_time_scan_schedule_id
if manual_scan_configuration_id is not None:
self.manual_scan_configuration_id = manual_scan_configuration_id
if scheduled_scan_configuration_id is not None:
self.scheduled_scan_configuration_id = scheduled_scan_configuration_id
if last_manual_scan is not None:
self.last_manual_scan = last_manual_scan
if last_scheduled_scan is not None:
self.last_scheduled_scan = last_scheduled_scan
@property
def state(self):
return self._state
@state.setter
def state(self, state):
allowed_values = ["inherited", "on", "off"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def module_status(self):
return self._module_status
@module_status.setter
def module_status(self, module_status):
self._module_status = module_status
@property
def real_time_scan_configuration_id(self):
return self._real_time_scan_configuration_id
@real_time_scan_configuration_id.setter
def real_time_scan_configuration_id(self, real_time_scan_configuration_id):
self._real_time_scan_configuration_id = real_time_scan_configuration_id
@property
def real_time_scan_schedule_id(self):
return self._real_time_scan_schedule_id
@real_time_scan_schedule_id.setter
def real_time_scan_schedule_id(self, real_time_scan_schedule_id):
self._real_time_scan_schedule_id = real_time_scan_schedule_id
@property
def manual_scan_configuration_id(self):
return self._manual_scan_configuration_id
@manual_scan_configuration_id.setter
def manual_scan_configuration_id(self, manual_scan_configuration_id):
self._manual_scan_configuration_id = manual_scan_configuration_id
@property
def scheduled_scan_configuration_id(self):
return self._scheduled_scan_configuration_id
@scheduled_scan_configuration_id.setter
def scheduled_scan_configuration_id(self, scheduled_scan_configuration_id):
self._scheduled_scan_configuration_id = scheduled_scan_configuration_id
@property
def last_manual_scan(self):
return self._last_manual_scan
@last_manual_scan.setter
def last_manual_scan(self, last_manual_scan):
self._last_manual_scan = last_manual_scan
@property
def last_scheduled_scan(self):
return self._last_scheduled_scan
@last_scheduled_scan.setter
def last_scheduled_scan(self, last_scheduled_scan):
self._last_scheduled_scan = last_scheduled_scan
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AntiMalwareComputerExtension, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AntiMalwareComputerExtension):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f71787b41422d7fb8b33c9454081d2bd60c5fe8f | 1,150 | py | Python | cirq/contrib/svg/svg_test.py | lilies/Cirq | 519b8b70ba4d2d92d1c034c398161ebdbd23e2e7 | [
"Apache-2.0"
] | 3 | 2020-09-26T03:56:28.000Z | 2020-09-27T13:21:04.000Z | cirq/contrib/svg/svg_test.py | lilies/Cirq | 519b8b70ba4d2d92d1c034c398161ebdbd23e2e7 | [
"Apache-2.0"
] | 1 | 2020-08-11T15:45:17.000Z | 2020-08-11T15:45:17.000Z | cirq/contrib/svg/svg_test.py | lilies/Cirq | 519b8b70ba4d2d92d1c034c398161ebdbd23e2e7 | [
"Apache-2.0"
] | 1 | 2020-04-14T15:29:29.000Z | 2020-04-14T15:29:29.000Z | import pytest
import numpy as np
import cirq
from cirq.contrib.svg import circuit_to_svg
def test_svg():
a, b, c = cirq.LineQubit.range(3)
svg_text = circuit_to_svg(
cirq.Circuit(
cirq.CNOT(a, b),
cirq.CZ(b, c),
cirq.SWAP(a, c),
cirq.PhasedXPowGate(exponent=0.123, phase_exponent=0.456).on(c),
cirq.Z(a),
cirq.measure(a, b, c, key='z'),
cirq.MatrixGate(np.eye(2)).on(a),
))
assert '<svg' in svg_text
assert '</svg>' in svg_text
def test_svg_noise():
noise_model = cirq.ConstantQubitNoiseModel(cirq.DepolarizingChannel(p=1e-3))
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.X(q))
circuit = cirq.Circuit(noise_model.noisy_moments(circuit.moments, [q]))
svg = circuit_to_svg(circuit)
assert '>D(0.001)</text>' in svg
def test_validation():
with pytest.raises(ValueError):
circuit_to_svg(cirq.Circuit())
q0 = cirq.LineQubit(0)
with pytest.raises(ValueError):
circuit_to_svg(
cirq.Circuit([cirq.Moment([cirq.X(q0)]),
cirq.Moment([])]))
| 26.744186 | 80 | 0.6 | import pytest
import numpy as np
import cirq
from cirq.contrib.svg import circuit_to_svg
def test_svg():
a, b, c = cirq.LineQubit.range(3)
svg_text = circuit_to_svg(
cirq.Circuit(
cirq.CNOT(a, b),
cirq.CZ(b, c),
cirq.SWAP(a, c),
cirq.PhasedXPowGate(exponent=0.123, phase_exponent=0.456).on(c),
cirq.Z(a),
cirq.measure(a, b, c, key='z'),
cirq.MatrixGate(np.eye(2)).on(a),
))
assert '<svg' in svg_text
assert '</svg>' in svg_text
def test_svg_noise():
noise_model = cirq.ConstantQubitNoiseModel(cirq.DepolarizingChannel(p=1e-3))
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.X(q))
circuit = cirq.Circuit(noise_model.noisy_moments(circuit.moments, [q]))
svg = circuit_to_svg(circuit)
assert '>D(0.001)</text>' in svg
def test_validation():
with pytest.raises(ValueError):
circuit_to_svg(cirq.Circuit())
q0 = cirq.LineQubit(0)
with pytest.raises(ValueError):
circuit_to_svg(
cirq.Circuit([cirq.Moment([cirq.X(q0)]),
cirq.Moment([])]))
| true | true |
f71788ff490545bb580c7e52b9c71e363cbb8d15 | 6,165 | py | Python | recipes/libtool/all/test_package/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | 1 | 2021-11-11T03:07:13.000Z | 2021-11-11T03:07:13.000Z | recipes/libtool/all/test_package/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | 1 | 2021-11-22T13:54:48.000Z | 2021-11-22T14:09:45.000Z | recipes/libtool/all/test_package/conanfile.py | dpronin/conan-center-index | 5c6e41a618097d04e731c9831118a51dcb39ab3f | [
"MIT"
] | null | null | null | from conans import AutoToolsBuildEnvironment, CMake, ConanFile, tools
from contextlib import contextmanager
import glob
import os
import shutil
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
test_type = "explicit"
short_paths = True
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def requirements(self):
self.requires(self.tested_reference_str)
def build_requirements(self):
self.build_requires(self.tested_reference_str)
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
@contextmanager
def _build_context(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
with tools.environment_append({
"CC": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"CXX": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"AR": "{} lib".format(tools.unix_path(self.deps_user_info["automake"].ar_lib)),
"LD": "link",
}):
yield
else:
yield
@property
def _package_folder(self):
return os.path.join(self.build_folder, "package")
def _build_autotools(self):
""" Test autotools integration """
# Copy autotools directory to build folder
shutil.copytree(os.path.join(self.source_folder, "autotools"), os.path.join(self.build_folder, "autotools"))
with tools.chdir("autotools"):
self.run("{} --install --verbose -Wall".format(os.environ["AUTORECONF"]), win_bash=tools.os_info.is_windows)
tools.mkdir(self._package_folder)
conf_args = [
"--prefix={}".format(tools.unix_path(self._package_folder)),
"--enable-shared", "--enable-static",
]
os.mkdir("bin_autotools")
with tools.chdir("bin_autotools"):
with self._build_context():
autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autotools.libs = []
autotools.configure(args=conf_args, configure_dir=os.path.join(self.build_folder, "autotools"))
autotools.make(args=["V=1"])
autotools.install()
def _test_autotools(self):
assert os.path.isdir(os.path.join(self._package_folder, "bin"))
assert os.path.isfile(os.path.join(self._package_folder, "include", "lib.h"))
assert os.path.isdir(os.path.join(self._package_folder, "lib"))
if not tools.cross_building(self):
self.run(os.path.join(self._package_folder, "bin", "test_package"), run_environment=True)
def _build_ltdl(self):
""" Build library using ltdl library """
cmake = CMake(self)
cmake.configure(source_folder="ltdl")
cmake.build()
def _test_ltdl(self):
""" Test library using ltdl library"""
lib_suffix = {
"Linux": "so",
"FreeBSD": "so",
"Macos": "dylib",
"Windows": "dll",
}[str(self.settings.os)]
if not tools.cross_building(self):
bin_path = os.path.join("bin", "test_package")
libdir = "bin" if self.settings.os == "Windows" else "lib"
lib_path = os.path.join(libdir, "liba.{}".format(lib_suffix))
self.run("{} {}".format(bin_path, lib_path), run_environment=True)
def _build_static_lib_in_shared(self):
""" Build shared library using libtool (while linking to a static library) """
# Copy static-in-shared directory to build folder
autotools_folder = os.path.join(self.build_folder, "sis")
shutil.copytree(os.path.join(self.source_folder, "sis"), autotools_folder)
install_prefix = os.path.join(autotools_folder, "prefix")
# Build static library using CMake
cmake = CMake(self)
cmake.definitions["CMAKE_INSTALL_PREFIX"] = install_prefix
cmake.configure(source_folder=autotools_folder, build_folder=os.path.join(autotools_folder, "cmake_build"))
cmake.build()
cmake.install()
# Copy autotools directory to build folder
with tools.chdir(autotools_folder):
self.run("{} -ifv -Wall".format(os.environ["AUTORECONF"]), win_bash=tools.os_info.is_windows)
with tools.chdir(autotools_folder):
conf_args = [
"--enable-shared",
"--disable-static",
"--prefix={}".format(tools.unix_path(os.path.join(install_prefix))),
]
with self._build_context():
autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autotools.libs = []
autotools.link_flags.append("-L{}".format(tools.unix_path(os.path.join(install_prefix, "lib"))))
autotools.configure(args=conf_args, configure_dir=autotools_folder)
autotools.make(args=["V=1"])
autotools.install()
def _test_static_lib_in_shared(self):
""" Test existence of shared library """
install_prefix = os.path.join(self.build_folder, "sis", "prefix")
with tools.chdir(install_prefix):
if self.settings.os == "Windows":
assert len(list(glob.glob(os.path.join("bin", "*.dll")))) > 0
elif tools.is_apple_os(self.settings.os):
assert len(list(glob.glob(os.path.join("lib", "*.dylib")))) > 0
else:
assert len(list(glob.glob(os.path.join("lib", "*.so")))) > 0
def build(self):
self._build_ltdl()
if not tools.cross_building(self):
self._build_autotools()
self._build_static_lib_in_shared()
def test(self):
self._test_ltdl()
if not tools.cross_building(self):
self._test_autotools()
self._test_static_lib_in_shared()
| 40.559211 | 120 | 0.610868 | from conans import AutoToolsBuildEnvironment, CMake, ConanFile, tools
from contextlib import contextmanager
import glob
import os
import shutil
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
test_type = "explicit"
short_paths = True
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def requirements(self):
self.requires(self.tested_reference_str)
def build_requirements(self):
self.build_requires(self.tested_reference_str)
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
@contextmanager
def _build_context(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
with tools.environment_append({
"CC": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"CXX": "{} cl -nologo".format(tools.unix_path(self.deps_user_info["automake"].compile)),
"AR": "{} lib".format(tools.unix_path(self.deps_user_info["automake"].ar_lib)),
"LD": "link",
}):
yield
else:
yield
@property
def _package_folder(self):
return os.path.join(self.build_folder, "package")
def _build_autotools(self):
shutil.copytree(os.path.join(self.source_folder, "autotools"), os.path.join(self.build_folder, "autotools"))
with tools.chdir("autotools"):
self.run("{} --install --verbose -Wall".format(os.environ["AUTORECONF"]), win_bash=tools.os_info.is_windows)
tools.mkdir(self._package_folder)
conf_args = [
"--prefix={}".format(tools.unix_path(self._package_folder)),
"--enable-shared", "--enable-static",
]
os.mkdir("bin_autotools")
with tools.chdir("bin_autotools"):
with self._build_context():
autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autotools.libs = []
autotools.configure(args=conf_args, configure_dir=os.path.join(self.build_folder, "autotools"))
autotools.make(args=["V=1"])
autotools.install()
def _test_autotools(self):
assert os.path.isdir(os.path.join(self._package_folder, "bin"))
assert os.path.isfile(os.path.join(self._package_folder, "include", "lib.h"))
assert os.path.isdir(os.path.join(self._package_folder, "lib"))
if not tools.cross_building(self):
self.run(os.path.join(self._package_folder, "bin", "test_package"), run_environment=True)
def _build_ltdl(self):
cmake = CMake(self)
cmake.configure(source_folder="ltdl")
cmake.build()
def _test_ltdl(self):
lib_suffix = {
"Linux": "so",
"FreeBSD": "so",
"Macos": "dylib",
"Windows": "dll",
}[str(self.settings.os)]
if not tools.cross_building(self):
bin_path = os.path.join("bin", "test_package")
libdir = "bin" if self.settings.os == "Windows" else "lib"
lib_path = os.path.join(libdir, "liba.{}".format(lib_suffix))
self.run("{} {}".format(bin_path, lib_path), run_environment=True)
def _build_static_lib_in_shared(self):
autotools_folder = os.path.join(self.build_folder, "sis")
shutil.copytree(os.path.join(self.source_folder, "sis"), autotools_folder)
install_prefix = os.path.join(autotools_folder, "prefix")
cmake = CMake(self)
cmake.definitions["CMAKE_INSTALL_PREFIX"] = install_prefix
cmake.configure(source_folder=autotools_folder, build_folder=os.path.join(autotools_folder, "cmake_build"))
cmake.build()
cmake.install()
with tools.chdir(autotools_folder):
self.run("{} -ifv -Wall".format(os.environ["AUTORECONF"]), win_bash=tools.os_info.is_windows)
with tools.chdir(autotools_folder):
conf_args = [
"--enable-shared",
"--disable-static",
"--prefix={}".format(tools.unix_path(os.path.join(install_prefix))),
]
with self._build_context():
autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
autotools.libs = []
autotools.link_flags.append("-L{}".format(tools.unix_path(os.path.join(install_prefix, "lib"))))
autotools.configure(args=conf_args, configure_dir=autotools_folder)
autotools.make(args=["V=1"])
autotools.install()
def _test_static_lib_in_shared(self):
install_prefix = os.path.join(self.build_folder, "sis", "prefix")
with tools.chdir(install_prefix):
if self.settings.os == "Windows":
assert len(list(glob.glob(os.path.join("bin", "*.dll")))) > 0
elif tools.is_apple_os(self.settings.os):
assert len(list(glob.glob(os.path.join("lib", "*.dylib")))) > 0
else:
assert len(list(glob.glob(os.path.join("lib", "*.so")))) > 0
def build(self):
self._build_ltdl()
if not tools.cross_building(self):
self._build_autotools()
self._build_static_lib_in_shared()
def test(self):
self._test_ltdl()
if not tools.cross_building(self):
self._test_autotools()
self._test_static_lib_in_shared()
| true | true |
f71789c0f44a5dca7010d4a66926c97673dff301 | 4,401 | py | Python | models/backbone.py | playerkk/HoiTransformer | b710216d6b338863ebe9d40a96765ab52780cefa | [
"Apache-2.0"
] | 107 | 2021-03-03T13:31:32.000Z | 2022-03-31T10:59:45.000Z | models/backbone.py | playerkk/HoiTransformer | b710216d6b338863ebe9d40a96765ab52780cefa | [
"Apache-2.0"
] | 37 | 2021-03-10T11:36:49.000Z | 2022-02-22T03:58:12.000Z | models/backbone.py | playerkk/HoiTransformer | b710216d6b338863ebe9d40a96765ab52780cefa | [
"Apache-2.0"
] | 19 | 2021-03-17T13:21:03.000Z | 2022-02-09T09:48:58.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = False # args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, False)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| 37.29661 | 113 | 0.656669 |
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = False
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, False)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.