CombinedText stringlengths 4 3.42M |
|---|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
from unittest import TestCase
import tensorflow as tf
from bigdl.optim.optimizer import SeveralIteration
from zoo.orca.learn.tf.estimator import Estimator
from zoo.common.nncontext import *
from zoo.orca.learn.tf.utils import convert_predict_to_dataframe
class TestEstimatorForKeras(TestCase):
def setup_method(self, method):
self.resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
def create_model(self):
user = tf.keras.layers.Input(shape=[1])
item = tf.keras.layers.Input(shape=[1])
feat = tf.keras.layers.concatenate([user, item], axis=1)
predictions = tf.keras.layers.Dense(2, activation='softmax')(feat)
model = tf.keras.models.Model(inputs=[user, item], outputs=predictions)
model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def create_model_with_clip(self):
user = tf.keras.layers.Input(shape=[1])
item = tf.keras.layers.Input(shape=[1])
feat = tf.keras.layers.concatenate([user, item], axis=1)
predictions = tf.keras.layers.Dense(2, activation='softmax')(feat)
model = tf.keras.models.Model(inputs=[user, item], outputs=predictions)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate=0.001,
rho=0.9,
momentum=0.0,
epsilon=1e-07,
centered=False,
name="RMSprop",
clipnorm=1.2,
clipvalue=0.2
)
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def test_estimator_keras_xshards(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
eval_result = est.evaluate(data_shard)
print(eval_result)
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
}
return result
data_shard = data_shard.transform_shard(transform)
predictions = est.predict(data_shard).collect()
assert predictions[0]['prediction'].shape[1] == 2
def test_estimator_keras_xshards_options(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_keras(keras_model=model)
# train with no validation
est.fit(data=data_shard,
batch_size=8,
epochs=10)
# train with different optimizer
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10
)
# train with session config
tf_session_config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
session_config=tf_session_config
)
# train with model dir
temp = tempfile.mkdtemp()
model_dir = os.path.join(temp, "model")
est = Estimator.from_keras(keras_model=model, model_dir=model_dir)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
assert len(os.listdir(model_dir)) > 0
shutil.rmtree(temp)
def test_estimator_keras_xshards_clip(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model_with_clip()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
def test_estimator_keras_xshards_checkpoint(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
temp = tempfile.mkdtemp()
model_dir = os.path.join(temp, "test_model")
est = Estimator.from_keras(keras_model=model, model_dir=model_dir)
est.fit(data=data_shard,
batch_size=8,
epochs=6,
validation_data=data_shard,
checkpoint_trigger=SeveralIteration(4))
eval_result = est.evaluate(data_shard)
print(eval_result)
tf.reset_default_graph()
model = self.create_model()
est = Estimator.from_keras(keras_model=model, model_dir=model_dir)
est.load_latest_orca_checkpoint(model_dir)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard,
checkpoint_trigger=SeveralIteration(4))
eval_result = est.evaluate(data_shard)
print(eval_result)
shutil.rmtree(temp)
def test_estimator_keras_dataframe(self):
tf.reset_default_graph()
model = self.create_model()
sc = init_nncontext()
sqlcontext = SQLContext(sc)
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
df = sqlcontext.read.csv(file_path, header=True, inferSchema=True)
from pyspark.sql.functions import array
df = df.withColumn('user', array('user')) \
.withColumn('item', array('item'))
est = Estimator.from_keras(keras_model=model)
est.fit(data=df,
batch_size=8,
epochs=4,
feature_cols=['user', 'item'],
labels_cols=['label'],
validation_data=df)
eval_result = est.evaluate(df, feature_cols=['user', 'item'], labels_cols=['label'])
assert 'acc Top1Accuracy' in eval_result
prediction_df = est.predict(df, batch_size=4, feature_cols=['user', 'item'])
assert 'prediction' in prediction_df.columns
predictions = prediction_df.collect()
assert len(predictions) == 10
def test_estimator_keras_dataframe_no_fit(self):
tf.reset_default_graph()
model = self.create_model()
sc = init_nncontext()
sqlcontext = SQLContext(sc)
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
df = sqlcontext.read.csv(file_path, header=True, inferSchema=True)
from pyspark.sql.functions import array
df = df.withColumn('user', array('user')) \
.withColumn('item', array('item'))
est = Estimator.from_keras(keras_model=model)
eval_result = est.evaluate(df, feature_cols=['user', 'item'], labels_cols=['label'])
assert 'acc Top1Accuracy' in eval_result
prediction_df = est.predict(df, batch_size=4, feature_cols=['user', 'item'])
assert 'prediction' in prediction_df.columns
predictions = prediction_df.collect()
assert len(predictions) == 10
def test_estimator_keras_tf_dataset(self):
tf.reset_default_graph()
model = self.create_model()
dataset = tf.data.Dataset.from_tensor_slices((np.random.randint(0, 200, size=(100, 1)),
np.random.randint(0, 50, size=(100, 1)),
np.ones(shape=(100,), dtype=np.int32)))
dataset = dataset.map(lambda user, item, label: [(user, item), label])
est = Estimator.from_keras(keras_model=model)
est.fit(data=dataset,
batch_size=8,
epochs=10,
validation_data=dataset)
eval_result = est.evaluate(dataset)
assert 'acc Top1Accuracy' in eval_result
def test_estimator_keras_tensorboard(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
temp = tempfile.mkdtemp()
model_dir = os.path.join(temp, "test_model")
est = Estimator.from_keras(keras_model=model, model_dir=model_dir)
assert est.get_train_summary("Loss") is None
assert est.get_validation_summary("Top1Accuracy") is None
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
train_loss = est.get_train_summary("Loss")
assert len(train_loss) > 0
val_scores = est.get_validation_summary("Top1Accuracy")
assert len(val_scores) > 0
tf.reset_default_graph()
# no model dir
model = self.create_model()
est = Estimator.from_keras(keras_model=model)
log_dir = os.path.join(temp, "log")
est.set_tensorboard(log_dir, "test")
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
assert os.path.exists(os.path.join(log_dir, "test/train"))
assert os.path.exists(os.path.join(log_dir, "test/validation"))
train_loss = est.get_train_summary("Loss")
val_scores = est.get_validation_summary("Loss")
assert len(train_loss) > 0
assert len(val_scores) > 0
shutil.rmtree(temp)
def test_convert_predict_list_of_array(self):
tf.reset_default_graph()
sc = init_nncontext()
sqlcontext = SQLContext(sc)
rdd = sc.parallelize([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
df = rdd.toDF(["feature", "label", "c"])
predict_rdd = df.rdd.map(lambda row: [np.array([1, 2]), np.array(0)])
resultDF = convert_predict_to_dataframe(df, predict_rdd)
resultDF.printSchema()
print(resultDF.collect()[0])
predict_rdd = df.rdd.map(lambda row: np.array(1))
resultDF = convert_predict_to_dataframe(df, predict_rdd)
resultDF.printSchema()
print(resultDF.collect()[0])
def test_estimator_keras_save_load(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
eval_result = est.evaluate(data_shard)
print(eval_result)
temp = tempfile.mkdtemp()
model_path = os.path.join(temp, 'test.h5')
est.save_keras_model(model_path)
tf.reset_default_graph()
from tensorflow.python.keras import models
from zoo.common.utils import load_from_file
def load_func(file_path):
return models.load_model(file_path)
model = load_from_file(load_func, model_path)
est = Estimator.from_keras(keras_model=model)
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
}
return result
data_shard = data_shard.transform_shard(transform)
predictions = est.predict(data_shard).collect()
assert predictions[0]['prediction'].shape[1] == 2
shutil.rmtree(temp)
if __name__ == "__main__":
import pytest
pytest.main([__file__])
add keras optimizer lr schedule test (#3056)
* add lr schedule test
* fix style
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
from unittest import TestCase
import tensorflow as tf
from bigdl.optim.optimizer import SeveralIteration
from zoo.orca.learn.tf.estimator import Estimator
from zoo.common.nncontext import *
from zoo.orca.learn.tf.utils import convert_predict_to_dataframe
class TestEstimatorForKeras(TestCase):
def setup_method(self, method):
self.resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
def create_model(self):
user = tf.keras.layers.Input(shape=[1])
item = tf.keras.layers.Input(shape=[1])
feat = tf.keras.layers.concatenate([user, item], axis=1)
predictions = tf.keras.layers.Dense(2, activation='softmax')(feat)
model = tf.keras.models.Model(inputs=[user, item], outputs=predictions)
model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def create_model_lr_schedule(self, init_lr, decay_steps, decay_rate):
x = tf.keras.layers.Input(shape=[8])
predictions = tf.keras.layers.Dense(1, use_bias=False,
kernel_initializer=tf.ones_initializer())(x)
model = tf.keras.models.Model(inputs=[x], outputs=predictions)
schedule = tf.keras.optimizers.schedules.ExponentialDecay(init_lr, decay_steps, decay_rate)
optimizer = tf.keras.optimizers.SGD(schedule)
model.compile(optimizer=optimizer,
loss=lambda label, pred: tf.reduce_mean(pred - label))
return model
def create_model_with_clip(self):
user = tf.keras.layers.Input(shape=[1])
item = tf.keras.layers.Input(shape=[1])
feat = tf.keras.layers.concatenate([user, item], axis=1)
predictions = tf.keras.layers.Dense(2, activation='softmax')(feat)
model = tf.keras.models.Model(inputs=[user, item], outputs=predictions)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate=0.001,
rho=0.9,
momentum=0.0,
epsilon=1e-07,
centered=False,
name="RMSprop",
clipnorm=1.2,
clipvalue=0.2
)
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def test_estimator_keras_xshards(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
eval_result = est.evaluate(data_shard)
print(eval_result)
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
}
return result
data_shard = data_shard.transform_shard(transform)
predictions = est.predict(data_shard).collect()
assert predictions[0]['prediction'].shape[1] == 2
def test_estimator_keras_xshards_options(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_keras(keras_model=model)
# train with no validation
est.fit(data=data_shard,
batch_size=8,
epochs=10)
# train with different optimizer
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10
)
# train with session config
tf_session_config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
session_config=tf_session_config
)
# train with model dir
temp = tempfile.mkdtemp()
model_dir = os.path.join(temp, "model")
est = Estimator.from_keras(keras_model=model, model_dir=model_dir)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
assert len(os.listdir(model_dir)) > 0
shutil.rmtree(temp)
def test_estimator_keras_xshards_clip(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model_with_clip()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
def test_estimator_keras_xshards_checkpoint(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
temp = tempfile.mkdtemp()
model_dir = os.path.join(temp, "test_model")
est = Estimator.from_keras(keras_model=model, model_dir=model_dir)
est.fit(data=data_shard,
batch_size=8,
epochs=6,
validation_data=data_shard,
checkpoint_trigger=SeveralIteration(4))
eval_result = est.evaluate(data_shard)
print(eval_result)
tf.reset_default_graph()
model = self.create_model()
est = Estimator.from_keras(keras_model=model, model_dir=model_dir)
est.load_latest_orca_checkpoint(model_dir)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard,
checkpoint_trigger=SeveralIteration(4))
eval_result = est.evaluate(data_shard)
print(eval_result)
shutil.rmtree(temp)
def test_estimator_keras_dataframe(self):
tf.reset_default_graph()
model = self.create_model()
sc = init_nncontext()
sqlcontext = SQLContext(sc)
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
df = sqlcontext.read.csv(file_path, header=True, inferSchema=True)
from pyspark.sql.functions import array
df = df.withColumn('user', array('user')) \
.withColumn('item', array('item'))
est = Estimator.from_keras(keras_model=model)
est.fit(data=df,
batch_size=8,
epochs=4,
feature_cols=['user', 'item'],
labels_cols=['label'],
validation_data=df)
eval_result = est.evaluate(df, feature_cols=['user', 'item'], labels_cols=['label'])
assert 'acc Top1Accuracy' in eval_result
prediction_df = est.predict(df, batch_size=4, feature_cols=['user', 'item'])
assert 'prediction' in prediction_df.columns
predictions = prediction_df.collect()
assert len(predictions) == 10
def test_estimator_keras_dataframe_no_fit(self):
tf.reset_default_graph()
model = self.create_model()
sc = init_nncontext()
sqlcontext = SQLContext(sc)
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
df = sqlcontext.read.csv(file_path, header=True, inferSchema=True)
from pyspark.sql.functions import array
df = df.withColumn('user', array('user')) \
.withColumn('item', array('item'))
est = Estimator.from_keras(keras_model=model)
eval_result = est.evaluate(df, feature_cols=['user', 'item'], labels_cols=['label'])
assert 'acc Top1Accuracy' in eval_result
prediction_df = est.predict(df, batch_size=4, feature_cols=['user', 'item'])
assert 'prediction' in prediction_df.columns
predictions = prediction_df.collect()
assert len(predictions) == 10
def test_estimator_keras_tf_dataset(self):
tf.reset_default_graph()
model = self.create_model()
dataset = tf.data.Dataset.from_tensor_slices((np.random.randint(0, 200, size=(100, 1)),
np.random.randint(0, 50, size=(100, 1)),
np.ones(shape=(100,), dtype=np.int32)))
dataset = dataset.map(lambda user, item, label: [(user, item), label])
est = Estimator.from_keras(keras_model=model)
est.fit(data=dataset,
batch_size=8,
epochs=10,
validation_data=dataset)
eval_result = est.evaluate(dataset)
assert 'acc Top1Accuracy' in eval_result
def test_estimator_keras_tensorboard(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
temp = tempfile.mkdtemp()
model_dir = os.path.join(temp, "test_model")
est = Estimator.from_keras(keras_model=model, model_dir=model_dir)
assert est.get_train_summary("Loss") is None
assert est.get_validation_summary("Top1Accuracy") is None
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
train_loss = est.get_train_summary("Loss")
assert len(train_loss) > 0
val_scores = est.get_validation_summary("Top1Accuracy")
assert len(val_scores) > 0
tf.reset_default_graph()
# no model dir
model = self.create_model()
est = Estimator.from_keras(keras_model=model)
log_dir = os.path.join(temp, "log")
est.set_tensorboard(log_dir, "test")
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
assert os.path.exists(os.path.join(log_dir, "test/train"))
assert os.path.exists(os.path.join(log_dir, "test/validation"))
train_loss = est.get_train_summary("Loss")
val_scores = est.get_validation_summary("Loss")
assert len(train_loss) > 0
assert len(val_scores) > 0
shutil.rmtree(temp)
def test_convert_predict_list_of_array(self):
tf.reset_default_graph()
sc = init_nncontext()
sqlcontext = SQLContext(sc)
rdd = sc.parallelize([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
df = rdd.toDF(["feature", "label", "c"])
predict_rdd = df.rdd.map(lambda row: [np.array([1, 2]), np.array(0)])
resultDF = convert_predict_to_dataframe(df, predict_rdd)
resultDF.printSchema()
print(resultDF.collect()[0])
predict_rdd = df.rdd.map(lambda row: np.array(1))
resultDF = convert_predict_to_dataframe(df, predict_rdd)
resultDF.printSchema()
print(resultDF.collect()[0])
def test_estimator_keras_save_load(self):
import zoo.orca.data.pandas
tf.reset_default_graph()
model = self.create_model()
file_path = os.path.join(self.resource_path, "orca/learn/ncf.csv")
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_keras(keras_model=model)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
eval_result = est.evaluate(data_shard)
print(eval_result)
temp = tempfile.mkdtemp()
model_path = os.path.join(temp, 'test.h5')
est.save_keras_model(model_path)
tf.reset_default_graph()
from tensorflow.python.keras import models
from zoo.common.utils import load_from_file
def load_func(file_path):
return models.load_model(file_path)
model = load_from_file(load_func, model_path)
est = Estimator.from_keras(keras_model=model)
data_shard = zoo.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy().reshape([-1, 1]),
df['item'].to_numpy().reshape([-1, 1])),
}
return result
data_shard = data_shard.transform_shard(transform)
predictions = est.predict(data_shard).collect()
assert predictions[0]['prediction'].shape[1] == 2
shutil.rmtree(temp)
def test_estimator_keras_learning_rate_schedule(self):
tf.reset_default_graph()
# loss = reduce_sum(w)
# dloss/dw = 1
model = self.create_model_lr_schedule(0.1, 1, 0.1)
dataset = tf.data.Dataset.from_tensor_slices((np.ones((16, 8)),
np.zeros((16, 1))))
est = Estimator.from_keras(keras_model=model)
weights_before = model.get_weights()[0]
est.fit(data=dataset,
batch_size=8,
epochs=1,
validation_data=dataset)
sess = tf.keras.backend.get_session()
iteartion = sess.run(model.optimizer.iterations)
weights_after = model.get_weights()[0]
first_step = weights_before - 0.1
second_step = first_step - 0.01
assert iteartion == 2
assert np.allclose(second_step, weights_after)
if __name__ == "__main__":
import pytest
pytest.main([__file__])
|
import unittest
import collections
from datetime import datetime, timedelta
from ..stock import Stock, StockSignal
class StockTest(unittest.TestCase):
def setUp(self):
self.goog = Stock("GOOG")
def test_price_of_a_new_stock_class_should_be_None(self):
self.assertIsNone(self.goog.price)
def test_stock_update(self):
"""An update should set the price on the stock object
We will be using the `datetime` module for the timestamp
"""
self.goog.update(datetime(2014, 2, 12), price=10)
self.assertEqual(10, self.goog.price)
def test_negative_price_should_throw_ValueError(self):
with self.assertRaises(ValueError):
self.goog.update(datetime(2014, 2, 13), -1)
def test_stock_price_should_give_the_latest_price(self):
self.goog.update(datetime(2014, 2, 12), price=10)
self.goog.update(datetime(2014, 2, 13), price=8.4)
self.assertAlmostEqual(8.4, self.goog.price, delta=0.0001)
def test_price_is_the_latest_even_if_updates_are_made_out_of_order(self):
self.goog.update(datetime(2014, 2, 13), price=8)
self.goog.update(datetime(2014, 2, 12), price=10)
self.assertEqual(8, self.goog.price)
class StockTrendTest(unittest.TestCase):
def setUp(self):
self.goog = Stock("GOOG")
def given_a_series_of_prices(self, prices):
timestamps = [datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12), datetime(2014, 2, 13)]
for timestamp, price in zip(timestamps, prices):
self.goog.update(timestamp, price)
def test_increasing_trend_is_true_if_price_increase_for_3_updates(self):
self.given_a_series_of_prices([8, 10, 12])
self.assertTrue(self.goog.is_increasing_trend())
def test_increasing_trend_is_false_if_price_decreases(self):
self.given_a_series_of_prices([8, 12, 10])
self.assertFalse(self.goog.is_increasing_trend())
def test_increasing_trend_is_false_if_price_equal(self):
self.given_a_series_of_prices([8, 10, 10])
self.assertFalse(self.goog.is_increasing_trend())
class StockCrossOverSignalTest(unittest.TestCase):
def setUp(self):
self.goog = Stock("GOOG")
def _flatten(self, timestamps):
for timestamp in timestamps:
if not isinstance(timestamp, collections.Iterable):
yield timestamp
else:
for value in self._flatten(timestamp):
yield value
def _generate_timestamp_for_date(self, date, price_list):
if not isinstance(price_list, collections.Iterable):
return date
else:
delta = 1.0/len(price_list)
return [date + i*timedelta(delta) for i in range(len(price_list))]
def _generate_timestamps(self, price_list):
return list(self._flatten([
self._generate_timestamp_for_date(datetime(2014, 2, 13) -
timedelta(i),
price_list[len(price_list)-i-1])
for i in range(len(price_list) - 1, -1, -1)
if price_list[len(price_list) - i - 1] is not None]))
def given_a_series_of_prices(self, price_list):
timestamps = self._generate_timestamps(price_list)
for timestamp, price in zip(timestamps,
list(self._flatten([p
for p in price_list
if p is not None]))):
self.goog.update(timestamp, price)
def test_generate_timestamp_returns_consecutive_dates(self):
price_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
expected = [
datetime(2014, 2, 3), datetime(2014, 2, 4), datetime(2014, 2, 5),
datetime(2014, 2, 6), datetime(2014, 2, 7), datetime(2014, 2, 8),
datetime(2014, 2, 9), datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12), datetime(2014, 2, 13)]
self.assertEqual(expected, self._generate_timestamps(price_list))
def test_generate_timestamp_skips_empty_dates(self):
price_list = [1, 2, 3, None, 5, 6, 7, 8, 9, 10, 11]
expected = [
datetime(2014, 2, 3), datetime(2014, 2, 4), datetime(2014, 2, 5),
datetime(2014, 2, 7), datetime(2014, 2, 8),
datetime(2014, 2, 9), datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12), datetime(2014, 2, 13)]
self.assertEqual(expected, self._generate_timestamps(price_list))
def test_generate_timestamp_handles_multiple_updates_per_date(self):
price_list = [1, 2, 3, [4, 3], 5, 6, 7, 8, 9, 10, 11]
expected = [
datetime(2014, 2, 3), datetime(2014, 2, 4), datetime(2014, 2, 5),
datetime(2014, 2, 6), datetime(2014, 2, 6, 12),
datetime(2014, 2, 7), datetime(2014, 2, 8),
datetime(2014, 2, 9), datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12), datetime(2014, 2, 13)]
self.assertEqual(expected, self._generate_timestamps(price_list))
def test_stock_with_no_data_returns_neutral(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
def test_stock_with_less_data_returns_neutral(self):
"""Even though the series has a downward crossover, we return neutral
because there are not enough data points"""
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
20, 21, 22, 23, 24, 25, 26, 27, 28, 1])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
def test_stock_with_no_crossover_returns_neutral(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
def test_with_downward_crossover_returns_sell(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 4])
self.assertEqual(StockSignal.sell,
self.goog.get_crossover_signal(date_to_check))
def test_with_upward_crossover_returns_buy(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 46])
self.assertEqual(StockSignal.buy,
self.goog.get_crossover_signal(date_to_check))
def test_should_only_look_at_closing_price(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
29, [5, 28], [5, 27], 26, 25, 24, 23, 22, 21, 20, [5, 46]])
self.assertEqual(StockSignal.buy,
self.goog.get_crossover_signal(date_to_check))
def test_should_be_neutral_if_not_enough_days_of_data(self):
"""Even though we have 13 updates, they only cover 10 days"""
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
[5, 28], [5, 27], 26, 25, 24, 23, 22, 21, 20, [5, 46]])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
def test_should_pick_up_previous_closing_if_no_updates_for_a_day(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
29, 28, 27, 26, 25, 24, 23, 22, 21, 20, None, None, 46])
self.assertEqual(StockSignal.buy,
self.goog.get_crossover_signal(date_to_check))
def test_should_have_11_days_worth_of_data(self):
"""Should return signal even if there is less than 11 number of updates
as in the case where some days have no updates but we pick up the
previous closing price to fill in the value"""
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
27, 26, 25, 24, 23, 22, 21, 20, None, None, 46])
self.assertEqual(StockSignal.buy,
self.goog.get_crossover_signal(date_to_check))
def test_date_to_check_can_be_beyond_last_update_date(self):
"""We have updates upto 13th, but we are checking signal on 15th.
It should just fill in the values for 14th and 15th since there are
no updates on these days"""
date_to_check = datetime(2014, 2, 15)
self.given_a_series_of_prices([
29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 46])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
nose2 - function style test case
import unittest
import collections
from datetime import datetime, timedelta
from ..stock import Stock, StockSignal
def test_price_of_a_new_stock_class_should_be_None():
goog = Stock("GOOG")
assert goog.price is None
class StockTest(unittest.TestCase):
def setUp(self):
self.goog = Stock("GOOG")
def test_price_of_a_new_stock_class_should_be_None(self):
self.assertIsNone(self.goog.price)
def test_stock_update(self):
"""An update should set the price on the stock object
We will be using the `datetime` module for the timestamp
"""
self.goog.update(datetime(2014, 2, 12), price=10)
self.assertEqual(10, self.goog.price)
def test_negative_price_should_throw_ValueError(self):
with self.assertRaises(ValueError):
self.goog.update(datetime(2014, 2, 13), -1)
def test_stock_price_should_give_the_latest_price(self):
self.goog.update(datetime(2014, 2, 12), price=10)
self.goog.update(datetime(2014, 2, 13), price=8.4)
self.assertAlmostEqual(8.4, self.goog.price, delta=0.0001)
def test_price_is_the_latest_even_if_updates_are_made_out_of_order(self):
self.goog.update(datetime(2014, 2, 13), price=8)
self.goog.update(datetime(2014, 2, 12), price=10)
self.assertEqual(8, self.goog.price)
class StockTrendTest(unittest.TestCase):
def setUp(self):
self.goog = Stock("GOOG")
def given_a_series_of_prices(self, prices):
timestamps = [datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12), datetime(2014, 2, 13)]
for timestamp, price in zip(timestamps, prices):
self.goog.update(timestamp, price)
def test_increasing_trend_is_true_if_price_increase_for_3_updates(self):
self.given_a_series_of_prices([8, 10, 12])
self.assertTrue(self.goog.is_increasing_trend())
def test_increasing_trend_is_false_if_price_decreases(self):
self.given_a_series_of_prices([8, 12, 10])
self.assertFalse(self.goog.is_increasing_trend())
def test_increasing_trend_is_false_if_price_equal(self):
self.given_a_series_of_prices([8, 10, 10])
self.assertFalse(self.goog.is_increasing_trend())
class StockCrossOverSignalTest(unittest.TestCase):
def setUp(self):
self.goog = Stock("GOOG")
def _flatten(self, timestamps):
for timestamp in timestamps:
if not isinstance(timestamp, collections.Iterable):
yield timestamp
else:
for value in self._flatten(timestamp):
yield value
def _generate_timestamp_for_date(self, date, price_list):
if not isinstance(price_list, collections.Iterable):
return date
else:
delta = 1.0/len(price_list)
return [date + i*timedelta(delta) for i in range(len(price_list))]
def _generate_timestamps(self, price_list):
return list(self._flatten([
self._generate_timestamp_for_date(datetime(2014, 2, 13) -
timedelta(i),
price_list[len(price_list)-i-1])
for i in range(len(price_list) - 1, -1, -1)
if price_list[len(price_list) - i - 1] is not None]))
def given_a_series_of_prices(self, price_list):
timestamps = self._generate_timestamps(price_list)
for timestamp, price in zip(timestamps,
list(self._flatten([p
for p in price_list
if p is not None]))):
self.goog.update(timestamp, price)
def test_generate_timestamp_returns_consecutive_dates(self):
price_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
expected = [
datetime(2014, 2, 3), datetime(2014, 2, 4), datetime(2014, 2, 5),
datetime(2014, 2, 6), datetime(2014, 2, 7), datetime(2014, 2, 8),
datetime(2014, 2, 9), datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12), datetime(2014, 2, 13)]
self.assertEqual(expected, self._generate_timestamps(price_list))
def test_generate_timestamp_skips_empty_dates(self):
price_list = [1, 2, 3, None, 5, 6, 7, 8, 9, 10, 11]
expected = [
datetime(2014, 2, 3), datetime(2014, 2, 4), datetime(2014, 2, 5),
datetime(2014, 2, 7), datetime(2014, 2, 8),
datetime(2014, 2, 9), datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12), datetime(2014, 2, 13)]
self.assertEqual(expected, self._generate_timestamps(price_list))
def test_generate_timestamp_handles_multiple_updates_per_date(self):
price_list = [1, 2, 3, [4, 3], 5, 6, 7, 8, 9, 10, 11]
expected = [
datetime(2014, 2, 3), datetime(2014, 2, 4), datetime(2014, 2, 5),
datetime(2014, 2, 6), datetime(2014, 2, 6, 12),
datetime(2014, 2, 7), datetime(2014, 2, 8),
datetime(2014, 2, 9), datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12), datetime(2014, 2, 13)]
self.assertEqual(expected, self._generate_timestamps(price_list))
def test_stock_with_no_data_returns_neutral(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
def test_stock_with_less_data_returns_neutral(self):
"""Even though the series has a downward crossover, we return neutral
because there are not enough data points"""
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
20, 21, 22, 23, 24, 25, 26, 27, 28, 1])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
def test_stock_with_no_crossover_returns_neutral(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
def test_with_downward_crossover_returns_sell(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 4])
self.assertEqual(StockSignal.sell,
self.goog.get_crossover_signal(date_to_check))
def test_with_upward_crossover_returns_buy(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 46])
self.assertEqual(StockSignal.buy,
self.goog.get_crossover_signal(date_to_check))
def test_should_only_look_at_closing_price(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
29, [5, 28], [5, 27], 26, 25, 24, 23, 22, 21, 20, [5, 46]])
self.assertEqual(StockSignal.buy,
self.goog.get_crossover_signal(date_to_check))
def test_should_be_neutral_if_not_enough_days_of_data(self):
"""Even though we have 13 updates, they only cover 10 days"""
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
[5, 28], [5, 27], 26, 25, 24, 23, 22, 21, 20, [5, 46]])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
def test_should_pick_up_previous_closing_if_no_updates_for_a_day(self):
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
29, 28, 27, 26, 25, 24, 23, 22, 21, 20, None, None, 46])
self.assertEqual(StockSignal.buy,
self.goog.get_crossover_signal(date_to_check))
def test_should_have_11_days_worth_of_data(self):
"""Should return signal even if there is less than 11 number of updates
as in the case where some days have no updates but we pick up the
previous closing price to fill in the value"""
date_to_check = datetime(2014, 2, 13)
self.given_a_series_of_prices([
27, 26, 25, 24, 23, 22, 21, 20, None, None, 46])
self.assertEqual(StockSignal.buy,
self.goog.get_crossover_signal(date_to_check))
def test_date_to_check_can_be_beyond_last_update_date(self):
"""We have updates upto 13th, but we are checking signal on 15th.
It should just fill in the values for 14th and 15th since there are
no updates on these days"""
date_to_check = datetime(2014, 2, 15)
self.given_a_series_of_prices([
29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 46])
self.assertEqual(StockSignal.neutral,
self.goog.get_crossover_signal(date_to_check))
|
"""
Django settings for huts project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1&1@xh%(guq+b#1&jv$e6pa9n6sm_w#9cia1)(+idj1)omok(*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hut',
'rest_framework',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
MIDDLEWARE_CLASSES = []
ROOT_URLCONF = 'huts.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'huts.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': dj_database_url.config(default='postgres://nxhxalrlqkckbd:1f8179624d9a773c8de38b1303b149283dfd58238fb10d0509cb85be49edcc2a@ec2-54-247-99-159.eu-west-1.compute.amazonaws.com:5432/d9tipol4jem759')
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# # Static files (CSS, JavaScript, Images)
# # https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
STATIC_ROOT = os.path.join(os.path.dirname(__file__), '../static_cdn')
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '../media_cdn')
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 2,
}
Update settings.py
"""
Django settings for huts project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1&1@xh%(guq+b#1&jv$e6pa9n6sm_w#9cia1)(+idj1)omok(*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#ALLOWED_HOSTS = ['*']
#CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hut',
'rest_framework',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
MIDDLEWARE_CLASSES = []
ROOT_URLCONF = 'huts.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'huts.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': dj_database_url.config(default='postgres://nxhxalrlqkckbd:1f8179624d9a773c8de38b1303b149283dfd58238fb10d0509cb85be49edcc2a@ec2-54-247-99-159.eu-west-1.compute.amazonaws.com:5432/d9tipol4jem759')
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# # Static files (CSS, JavaScript, Images)
# # https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
STATIC_ROOT = os.path.join(os.path.dirname(__file__), '../static_cdn')
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '../media_cdn')
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 2,
}
|
# coding=utf-8
# Copyright 2018 The Gin-Config Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the Gin configuration framework.
Programs frequently have a number of "hyperparameters" that require variation
across different executions of the program. When the number of such parameters
grows even moderately large, or use of some parameter is deeply embedded in the
code, top-level flags become very cumbersome. This module provides an
alternative mechanism for setting such hyperparameters, by allowing injection of
parameter values for any function marked as "configurable".
For detailed documentation, please see the user guide:
https://github.com/google/gin-config/tree/master/docs/index.md
# Making functions and classes configurable
Functions and classes can be marked configurable using the `@configurable`
decorator, which associates a "configurable name" with the function or class (by
default, just the function or class name). Optionally, parameters can be
whitelisted or blacklisted to mark only a subset of the function's parameters as
configurable. Once parameters have been bound (see below) to this function, any
subsequent calls will have those parameters automatically supplied by Gin.
If an argument supplied to a function by its caller (either as a positional
argument or as a keyword argument) corresponds to a parameter configured by Gin,
the caller's value will take precedence.
# A short example
Python code:
@gin.configurable
def mix_cocktail(ingredients):
...
@gin.configurable
def serve_random_cocktail(available_cocktails):
...
@gin.configurable
def drink(cocktail):
...
Gin configuration:
martini/mix_cocktail.ingredients = ['gin', 'vermouth', 'twist of lemon']
gin_and_tonic/mix_cocktail.ingredients = ['gin', 'tonic water']
serve_random_cocktail.available_cocktails = {
'martini': @martini/mix_cocktail,
'gin_and_tonic': @gin_and_tonic/mix_cocktail,
}
drink.cocktail = @serve_random_cocktail()
In the above example, there are three configurable functions: `mix_cocktail`
(with a parameter `ingredients`), `serve_random_cocktail` (with parameter
`available_cocktails`), and `drink` (with parameter `cocktail`).
When `serve_random_cocktail` is called, it will receive a dictionary
containing two scoped *references* to the `mix_cocktail` function (each scope
providing unique parameters, meaning calling the different references will
presumably produce different outputs).
On the other hand, when the `drink` function is called, it will receive the
*output* of calling `serve_random_cocktail` as the value of its `cocktail`
parameter, due to the trailing `()` in `@serve_random_cocktail()`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import functools
import inspect
import logging
import os
import pprint
import sys
import threading
import traceback
import enum
from gin import config_parser
from gin import selector_map
from gin import utils
import six
class _ScopeManager(threading.local):
"""Manages currently active config scopes.
This ensures thread safety of config scope management by subclassing
`threading.local`. Scopes are tracked as a stack, where elements in the
stack are lists of the currently active scope names.
"""
def _maybe_init(self):
if not hasattr(self, '_active_scopes'):
self._active_scopes = [[]]
@property
def active_scopes(self):
self._maybe_init()
return self._active_scopes[:]
@property
def current_scope(self):
self._maybe_init()
return self._active_scopes[-1][:] # Slice to get copy.
def enter_scope(self, scope):
"""Enters the given scope, updating the list of active scopes.
Args:
scope: A list of active scope names, ordered from outermost to innermost.
"""
self._maybe_init()
self._active_scopes.append(scope)
def exit_scope(self):
"""Exits the most recently entered scope."""
self._maybe_init()
self._active_scopes.pop()
# Maintains the registry of configurable functions and classes.
_REGISTRY = selector_map.SelectorMap()
# Maps tuples of `(scope, selector)` to associated parameter values. This
# specifies the current global "configuration" set through `bind_parameter` or
# `parse_config`, but doesn't include any functions' default argument values.
_CONFIG = {}
# Keeps a set of module names that were dynamically imported via config files.
_IMPORTED_MODULES = set()
# Maps `(scope, selector)` tuples to all configurable parameter values used
# during program execution (including default argument values).
_OPERATIVE_CONFIG = {}
_OPERATIVE_CONFIG_LOCK = threading.Lock()
# Keeps track of currently active config scopes.
_SCOPE_MANAGER = _ScopeManager()
# Keeps track of hooks to run when the Gin config is finalized.
_FINALIZE_HOOKS = []
# Keeps track of whether the config is locked.
_CONFIG_IS_LOCKED = False
# Keeps track of whether "interactive mode" is enabled, in which case redefining
# a configurable is not an error.
_INTERACTIVE_MODE = False
# Keeps track of constants created via gin.constant, to both prevent duplicate
# definitions and to avoid writing them to the operative config.
_CONSTANTS = selector_map.SelectorMap()
# Keeps track of singletons created via the singleton configurable.
_SINGLETONS = {}
# Keeps track of file readers. These are functions that behave like Python's
# `open` function (can be used a context manager) and will be used to load
# config files. Each element of this list should be a tuple of `(function,
# exception_type)`, where `exception_type` is the type of exception thrown by
# `function` when a file can't be opened/read successfully.
_FILE_READERS = [(open, os.path.isfile)]
# Maintains a cache of argspecs for functions.
_ARG_SPEC_CACHE = {}
# List of location prefixes. Similar to PATH var in unix to be used to search
# for files with those prefixes.
_LOCATION_PREFIXES = ['']
# Value to represent required parameters.
REQUIRED = object()
def _find_class_construction_fn(cls):
"""Find the first __init__ or __new__ method in the given class's MRO."""
for base in type.mro(cls):
if '__init__' in base.__dict__:
return base.__init__
if '__new__' in base.__dict__:
return base.__new__
def _ensure_wrappability(fn):
"""Make sure `fn` can be wrapped cleanly by functools.wraps."""
# Handle "builtin_function_or_method", "wrapped_descriptor", and
# "method-wrapper" types.
unwrappable_types = (type(sum), type(object.__init__), type(object.__call__))
if isinstance(fn, unwrappable_types):
# pylint: disable=unnecessary-lambda
wrappable_fn = lambda *args, **kwargs: fn(*args, **kwargs)
wrappable_fn.__name__ = fn.__name__
wrappable_fn.__doc__ = fn.__doc__
wrappable_fn.__module__ = '' # These types have no __module__, sigh.
wrappable_fn.__wrapped__ = fn
return wrappable_fn
# Otherwise we're good to go...
return fn
def _decorate_fn_or_cls(decorator, fn_or_cls, subclass=False):
"""Decorate a function or class with the given decorator.
When `fn_or_cls` is a function, applies `decorator` to the function and
returns the (decorated) result.
When `fn_or_cls` is a class and the `subclass` parameter is `False`, this will
replace `fn_or_cls.__init__` with the result of applying `decorator` to it.
When `fn_or_cls` is a class and `subclass` is `True`, this will subclass the
class, but with `__init__` defined to be the result of applying `decorator` to
`fn_or_cls.__init__`. The decorated class has metadata (docstring, name, and
module information) copied over from `fn_or_cls`. The goal is to provide a
decorated class the behaves as much like the original as possible, without
modifying it (for example, inspection operations using `isinstance` or
`issubclass` should behave the same way as on the original class).
Args:
decorator: The decorator to use.
fn_or_cls: The function or class to decorate.
subclass: Whether to decorate classes by subclassing. This argument is
ignored if `fn_or_cls` is not a class.
Returns:
The decorated function or class.
"""
if not inspect.isclass(fn_or_cls):
return decorator(_ensure_wrappability(fn_or_cls))
construction_fn = _find_class_construction_fn(fn_or_cls)
if subclass:
class DecoratedClass(fn_or_cls):
__doc__ = fn_or_cls.__doc__
__module__ = fn_or_cls.__module__
DecoratedClass.__name__ = fn_or_cls.__name__
if six.PY3:
DecoratedClass.__qualname__ = fn_or_cls.__qualname__
cls = DecoratedClass
else:
cls = fn_or_cls
decorated_fn = decorator(_ensure_wrappability(construction_fn))
if construction_fn.__name__ == '__new__':
decorated_fn = staticmethod(decorated_fn)
setattr(cls, construction_fn.__name__, decorated_fn)
return cls
class Configurable(
collections.namedtuple('Configurable', [
'fn_or_cls', 'name', 'module', 'whitelist', 'blacklist', 'selector'
])):
pass
def _raise_unknown_reference_error(ref, additional_msg=''):
err_str = "No configurable matching reference '@{}{}'.{}"
maybe_parens = '()' if ref.evaluate else ''
raise ValueError(err_str.format(ref.selector, maybe_parens, additional_msg))
class ConfigurableReference(object):
"""Represents a reference to a configurable function or class."""
def __init__(self, scoped_selector, evaluate):
self._scoped_selector = scoped_selector
self._evaluate = evaluate
scoped_selector_parts = self._scoped_selector.split('/')
self._scopes = scoped_selector_parts[:-1]
self._selector = scoped_selector_parts[-1]
self._configurable = _REGISTRY.get_match(self._selector)
if not self._configurable:
_raise_unknown_reference_error(self)
def reference_decorator(fn):
if self._scopes:
@six.wraps(fn)
def scoping_wrapper(*args, **kwargs):
with config_scope(self._scopes):
return fn(*args, **kwargs)
return scoping_wrapper
return fn
self._scoped_configurable_fn = _decorate_fn_or_cls(
reference_decorator, self.configurable.fn_or_cls, True)
@property
def configurable(self):
return self._configurable
@property
def scoped_configurable_fn(self):
return self._scoped_configurable_fn
@property
def scopes(self):
return self._scopes
@property
def selector(self):
return self._selector
@property
def scoped_selector(self):
return self._scoped_selector
@property
def config_key(self):
return ('/'.join(self._scopes), self._configurable.selector)
@property
def evaluate(self):
return self._evaluate
def __eq__(self, other):
if isinstance(other, self.__class__):
# pylint: disable=protected-access
return (
self._configurable == other._configurable and
self._evaluate == other._evaluate)
# pylint: enable=protected-access
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
# Check if this reference is a macro or constant, i.e. @.../macro() or
# @.../constant(). Only macros and constants correspond to the %... syntax.
configurable_fn = self._configurable.fn_or_cls
if configurable_fn in (macro, _retrieve_constant) and self._evaluate:
return '%' + '/'.join(self._scopes)
maybe_parens = '()' if self._evaluate else ''
return '@{}{}'.format(self._scoped_selector, maybe_parens)
def __deepcopy__(self, memo):
"""Dishonestly implements the __deepcopy__ special method.
When called, this returns either the `ConfigurableReference` instance itself
(when `self._evaluate` is `False`) or the result of calling the underlying
configurable. Configurable references may be deeply nested inside other
Python data structures, and by providing this implementation,
`copy.deepcopy` can be used on the containing Python structure to return a
copy replacing any `ConfigurableReference` marked for evaluation with its
corresponding configurable's output.
Args:
memo: The memoization dict (unused).
Returns:
When `self._evaluate` is `False`, returns the underlying configurable
(maybe wrapped to be called in the proper scope). When `self._evaluate` is
`True`, returns the output of calling the underlying configurable.
"""
if self._evaluate:
return self._scoped_configurable_fn()
return self._scoped_configurable_fn
class _UnknownConfigurableReference(object):
"""Represents a reference to an unknown configurable.
This class acts as a substitute for `ConfigurableReference` when the selector
doesn't match any known configurable.
"""
def __init__(self, selector, evaluate):
self._selector = selector.split('/')[-1]
self._evaluate = evaluate
@property
def selector(self):
return self._selector
@property
def evaluate(self):
return self._evaluate
def __deepcopy__(self, memo):
"""Dishonestly implements the __deepcopy__ special method.
See `ConfigurableReference` above. If this method is called, it means there
was an attempt to use this unknown configurable reference, so we throw an
error here.
Args:
memo: The memoization dict (unused).
Raises:
ValueError: To report that there is no matching configurable.
"""
addl_msg = '\n\n To catch this earlier, ensure gin.finalize() is called.'
_raise_unknown_reference_error(self, addl_msg)
def _validate_skip_unknown(skip_unknown):
if not isinstance(skip_unknown, (bool, list, tuple, set)):
err_str = 'Invalid value for `skip_unknown`: {}'
raise ValueError(err_str.format(skip_unknown))
def _should_skip(selector, skip_unknown):
"""Checks whether `selector` should be skipped (if unknown)."""
_validate_skip_unknown(skip_unknown)
if _REGISTRY.matching_selectors(selector):
return False # Never skip known configurables.
if isinstance(skip_unknown, (list, tuple, set)):
return selector in skip_unknown
return skip_unknown # Must be a bool by validation check.
class ParserDelegate(config_parser.ParserDelegate):
"""Delegate to handle creation of configurable references and macros."""
def __init__(self, skip_unknown=False):
self._skip_unknown = skip_unknown
def configurable_reference(self, scoped_selector, evaluate):
unscoped_selector = scoped_selector.rsplit('/', 1)[-1]
if _should_skip(unscoped_selector, self._skip_unknown):
return _UnknownConfigurableReference(scoped_selector, evaluate)
return ConfigurableReference(scoped_selector, evaluate)
def macro(self, name):
matching_selectors = _CONSTANTS.matching_selectors(name)
if matching_selectors:
if len(matching_selectors) == 1:
name = matching_selectors[0]
return ConfigurableReference(name + '/gin.constant', True)
err_str = "Ambiguous constant selector '{}', matches {}."
raise ValueError(err_str.format(name, matching_selectors))
return ConfigurableReference(name + '/gin.macro', True)
class ParsedBindingKey(
collections.namedtuple('ParsedBindingKey', [
'scope', 'given_selector', 'complete_selector', 'arg_name'
])):
"""Represents a parsed and validated binding key.
A "binding key" identifies a specific parameter (`arg_name`), of a specific
configurable (`complete_selector`), in a specific scope (`scope`), to which a
value may be bound in the global configuration. The `given_selector` field
retains information about how the original configurable selector was
specified, which can be helpful for error messages (but is ignored for the
purposes of equality and hashing).
"""
def __new__(cls, binding_key):
"""Parses and validates the given binding key.
This function will parse `binding_key` (if necessary), and ensure that the
specified parameter can be bound for the given configurable selector (i.e.,
that the parameter isn't blacklisted or not whitelisted if a whitelist was
provided).
Args:
binding_key: A spec identifying a parameter of a configurable (maybe in
some scope). This should either be a string of the form
'maybe/some/scope/maybe.modules.configurable_name.parameter_name'; or a
list or tuple of `(scope, selector, arg_name)`; or another instance of
`ParsedBindingKey`.
Returns:
A new instance of `ParsedBindingKey`.
Raises:
ValueError: If no function can be found matching the configurable name
specified by `binding_key`, or if the specified parameter name is
blacklisted or not in the function's whitelist (if present).
"""
if isinstance(binding_key, ParsedBindingKey):
return super(ParsedBindingKey, cls).__new__(cls, *binding_key)
if isinstance(binding_key, (list, tuple)):
scope, selector, arg_name = binding_key
elif isinstance(binding_key, six.string_types):
scope, selector, arg_name = config_parser.parse_binding_key(binding_key)
else:
err_str = 'Invalid type for binding_key: {}.'
raise ValueError(err_str.format(type(binding_key)))
configurable_ = _REGISTRY.get_match(selector)
if not configurable_:
raise ValueError("No configurable matching '{}'.".format(selector))
if not _might_have_parameter(configurable_.fn_or_cls, arg_name):
err_str = "Configurable '{}' doesn't have a parameter named '{}'."
raise ValueError(err_str.format(selector, arg_name))
if configurable_.whitelist and arg_name not in configurable_.whitelist:
err_str = "Configurable '{}' doesn't include kwarg '{}' in its whitelist."
raise ValueError(err_str.format(selector, arg_name))
if configurable_.blacklist and arg_name in configurable_.blacklist:
err_str = "Configurable '{}' has blacklisted kwarg '{}'."
raise ValueError(err_str.format(selector, arg_name))
return super(ParsedBindingKey, cls).__new__(
cls,
scope=scope,
given_selector=selector,
complete_selector=configurable_.selector,
arg_name=arg_name)
@property
def config_key(self):
return self.scope, self.complete_selector
@property
def scope_selector_arg(self):
return self.scope, self.complete_selector, self.arg_name
def __equal__(self, other):
# Equality ignores the `given_selector` field, since two binding keys should
# be equal whenever they identify the same parameter.
return self.scope_selector_arg == other.scope_selector_arg
def __hash__(self):
return hash(self.scope_selector_arg)
def _format_value(value):
"""Returns `value` in a format parseable by `parse_value`, or `None`.
Simply put, This function ensures that when it returns a string value, the
following will hold:
parse_value(_format_value(value)) == value
Args:
value: The value to format.
Returns:
A string representation of `value` when `value` is literally representable,
or `None`.
"""
literal = repr(value)
try:
if parse_value(literal) == value:
return literal
except SyntaxError:
pass
return None
def _is_literally_representable(value):
"""Returns `True` if `value` can be (parseably) represented as a string.
Args:
value: The value to check.
Returns:
`True` when `value` can be represented as a string parseable by
`parse_literal`, `False` otherwise.
"""
return _format_value(value) is not None
def clear_config(clear_constants=False):
"""Clears the global configuration.
This clears any parameter values set by `bind_parameter` or `parse_config`, as
well as the set of dynamically imported modules. It does not remove any
configurable functions or classes from the registry of configurables.
Args:
clear_constants: Whether to clear constants created by `constant`. Defaults
to False.
"""
_set_config_is_locked(False)
_CONFIG.clear()
_SINGLETONS.clear()
if clear_constants:
_CONSTANTS.clear()
else:
saved_constants = _CONSTANTS.copy()
_CONSTANTS.clear() # Clear then redefine constants (re-adding bindings).
for name, value in six.iteritems(saved_constants):
constant(name, value)
_IMPORTED_MODULES.clear()
_OPERATIVE_CONFIG.clear()
def bind_parameter(binding_key, value):
"""Binds the parameter value specified by `binding_key` to `value`.
The `binding_key` argument should either be a string of the form
`maybe/scope/optional.module.names.configurable_name.parameter_name`, or a
list or tuple of `(scope, selector, parameter_name)`, where `selector`
corresponds to `optional.module.names.configurable_name`. Once this function
has been called, subsequent calls (in the specified scope) to the specified
configurable function will have `value` supplied to their `parameter_name`
parameter.
Example:
@configurable('fully_connected_network')
def network_fn(num_layers=5, units_per_layer=1024):
...
def main(_):
config.bind_parameter('fully_connected_network.num_layers', 3)
network_fn() # Called with num_layers == 3, not the default of 5.
Args:
binding_key: The parameter whose value should be set. This can either be a
string, or a tuple of the form `(scope, selector, parameter)`.
value: The desired value.
Raises:
RuntimeError: If the config is locked.
ValueError: If no function can be found matching the configurable name
specified by `binding_key`, or if the specified parameter name is
blacklisted or not in the function's whitelist (if present).
"""
if config_is_locked():
raise RuntimeError('Attempted to modify locked Gin config.')
pbk = ParsedBindingKey(binding_key)
fn_dict = _CONFIG.setdefault(pbk.config_key, {})
fn_dict[pbk.arg_name] = value
def query_parameter(binding_key):
"""Returns the currently bound value to the specified `binding_key`.
The `binding_key` argument should look like
'maybe/some/scope/maybe.modules.configurable_name.parameter_name'. Note that
this will not include default parameters.
Args:
binding_key: The parameter whose value should be queried.
Returns:
The value bound to the configurable/parameter combination given in
`binding_key`.
Raises:
ValueError: If no function can be found matching the configurable name
specified by `biding_key`, or if the specified parameter name is
blacklisted or not in the function's whitelist (if present) or if there is
no value bound for the queried parameter or configurable.
"""
if config_parser.MODULE_RE.match(binding_key):
matching_selectors = _CONSTANTS.matching_selectors(binding_key)
if len(matching_selectors) == 1:
return _CONSTANTS[matching_selectors[0]]
elif len(matching_selectors) > 1:
err_str = "Ambiguous constant selector '{}', matches {}."
raise ValueError(err_str.format(binding_key, matching_selectors))
pbk = ParsedBindingKey(binding_key)
if pbk.config_key not in _CONFIG:
err_str = "Configurable '{}' has no bound parameters."
raise ValueError(err_str.format(pbk.given_selector))
if pbk.arg_name not in _CONFIG[pbk.config_key]:
err_str = "Configurable '{}' has no value bound for parameter '{}'."
raise ValueError(err_str.format(pbk.given_selector, pbk.arg_name))
return _CONFIG[pbk.config_key][pbk.arg_name]
def _might_have_parameter(fn_or_cls, arg_name):
"""Returns True if `arg_name` might be a valid parameter for `fn_or_cls`.
Specifically, this means that `fn_or_cls` either has a parameter named
`arg_name`, or has a `**kwargs` parameter.
Args:
fn_or_cls: The function or class to check.
arg_name: The name fo the parameter.
Returns:
Whether `arg_name` might be a valid argument of `fn`.
"""
if inspect.isclass(fn_or_cls):
fn = _find_class_construction_fn(fn_or_cls)
else:
fn = fn_or_cls
while hasattr(fn, '__wrapped__'):
fn = fn.__wrapped__
arg_spec = _get_cached_arg_spec(fn)
if six.PY3:
if arg_spec.varkw:
return True
return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs
else:
if arg_spec.keywords:
return True
return arg_name in arg_spec.args
def _validate_parameters(fn_or_cls, arg_name_list, err_prefix):
for arg_name in arg_name_list or []:
if not _might_have_parameter(fn_or_cls, arg_name):
err_str = "Argument '{}' in {} not a parameter of '{}'."
raise ValueError(err_str.format(arg_name, err_prefix, fn_or_cls.__name__))
def _get_cached_arg_spec(fn):
"""Gets cached argspec for `fn`."""
arg_spec = _ARG_SPEC_CACHE.get(fn)
if arg_spec is None:
arg_spec_fn = inspect.getfullargspec if six.PY3 else inspect.getargspec
try:
arg_spec = arg_spec_fn(fn)
except TypeError:
# `fn` might be a callable object.
arg_spec = arg_spec_fn(fn.__call__)
_ARG_SPEC_CACHE[fn] = arg_spec
return arg_spec
def _get_supplied_positional_parameter_names(fn, args):
"""Returns the names of the supplied arguments to the given function."""
arg_spec = _get_cached_arg_spec(fn)
# May be shorter than len(args) if args contains vararg (*args) arguments.
return arg_spec.args[:len(args)]
def _get_all_positional_parameter_names(fn):
"""Returns the names of all positional arguments to the given function."""
arg_spec = _get_cached_arg_spec(fn)
args = arg_spec.args
if arg_spec.defaults:
args = args[:-len(arg_spec.defaults)]
return args
def _get_kwarg_defaults(fn):
"""Returns a dict mapping kwargs to default values for the given function."""
arg_spec = _get_cached_arg_spec(fn)
if arg_spec.defaults:
default_kwarg_names = arg_spec.args[-len(arg_spec.defaults):]
arg_vals = dict(zip(default_kwarg_names, arg_spec.defaults))
else:
arg_vals = {}
if six.PY3 and arg_spec.kwonlydefaults:
arg_vals.update(arg_spec.kwonlydefaults)
return arg_vals
def _get_validated_required_kwargs(fn, fn_descriptor, whitelist, blacklist):
"""Gets required argument names, and validates against white/blacklist."""
kwarg_defaults = _get_kwarg_defaults(fn)
required_kwargs = []
for kwarg, default in six.iteritems(kwarg_defaults):
if default is REQUIRED:
if blacklist and kwarg in blacklist:
err_str = "Argument '{}' of {} marked REQUIRED but blacklisted."
raise ValueError(err_str.format(kwarg, fn_descriptor))
if whitelist and kwarg not in whitelist:
err_str = "Argument '{}' of {} marked REQUIRED but not whitelisted."
raise ValueError(err_str.format(kwarg, fn_descriptor))
required_kwargs.append(kwarg)
return required_kwargs
def _get_default_configurable_parameter_values(fn, whitelist, blacklist):
"""Retrieve all default values for configurable parameters of a function.
Any parameters included in the supplied blacklist, or not included in the
supplied whitelist, are excluded.
Args:
fn: The function whose parameter values should be retrieved.
whitelist: The whitelist (or `None`) associated with the function.
blacklist: The blacklist (or `None`) associated with the function.
Returns:
A dictionary mapping configurable parameter names to their default values.
"""
arg_vals = _get_kwarg_defaults(fn)
# Now, eliminate keywords that are blacklisted, or aren't whitelisted (if
# there's a whitelist), or aren't representable as a literal value.
for k in list(six.iterkeys(arg_vals)):
whitelist_fail = whitelist and k not in whitelist
blacklist_fail = blacklist and k in blacklist
representable = _is_literally_representable(arg_vals[k])
if whitelist_fail or blacklist_fail or not representable:
del arg_vals[k]
return arg_vals
def _order_by_signature(fn, arg_names):
"""Orders given `arg_names` based on their order in the signature of `fn`."""
arg_spec = _get_cached_arg_spec(fn)
all_args = list(arg_spec.args)
if six.PY3 and arg_spec.kwonlyargs:
all_args.extend(arg_spec.kwonlyargs)
ordered = [arg for arg in all_args if arg in arg_names]
# Handle any leftovers corresponding to varkwargs in the order we got them.
ordered.extend([arg for arg in arg_names if arg not in ordered])
return ordered
def current_scope():
return _SCOPE_MANAGER.current_scope
def current_scope_str():
return '/'.join(current_scope())
@contextlib.contextmanager
def config_scope(name_or_scope):
"""Opens a new configuration scope.
Provides a context manager that opens a new explicit configuration
scope. Explicit configuration scopes restrict parameter bindings to only
certain sections of code that run within the scope. Scopes can be nested to
arbitrary depth; any configurable functions called within a scope inherit
parameters defined by higher level scopes.
For example, suppose a function named `preprocess_images` is called in two
places in a codebase: Once when loading data for a training task, and once
when loading data for an evaluation task:
def load_training_data():
...
with gin.config_scope('train'):
images = preprocess_images(images)
...
def load_eval_data():
...
with gin.config_scope('eval'):
images = preprocess_images(images)
...
By using a `config_scope` to wrap each invocation of `preprocess_images` as
above, it is possible to use Gin to supply specific parameters to each. Here
is a possible configuration for the above example:
preprocess_images.crop_size = [64, 64]
preprocess_images.normalize_image = True
train/preprocess_images.crop_location = 'random'
train/preprocess_images.random_flip_lr = True
eval/preprocess_images.crop_location = 'center'
The `crop_size` and `normalize_image` parameters above will be shared by both
the `train` and `eval` invocations; only `train` will receive
`random_flip_lr`, and the two invocations receive different values for
`crop_location`.
Passing `None` or `''` to `config_scope` will temporarily clear all currently
active scopes (within the `with` block; they will be restored afterwards).
Args:
name_or_scope: A name for the config scope, or an existing scope (e.g.,
captured from `with gin.config_scope(...) as scope`), or `None` to clear
currently active scopes.
Raises:
ValueError: If `name_or_scope` is not a list, string, or None.
Yields:
The resulting config scope (a list of all active scope names, ordered from
outermost to innermost).
"""
try:
valid_value = True
if isinstance(name_or_scope, list):
new_scope = name_or_scope
elif name_or_scope and isinstance(name_or_scope, six.string_types):
new_scope = current_scope() # Returns a copy.
new_scope.extend(name_or_scope.split('/'))
else:
valid_value = name_or_scope in (None, '')
new_scope = []
# Append new_scope first. It will be popped in the finally block if an
# exception is raised below.
_SCOPE_MANAGER.enter_scope(new_scope)
scopes_are_valid = map(config_parser.MODULE_RE.match, new_scope)
if not valid_value or not all(scopes_are_valid):
err_str = 'Invalid value for `name_or_scope`: {}.'
raise ValueError(err_str.format(name_or_scope))
yield new_scope
finally:
_SCOPE_MANAGER.exit_scope()
def _make_gin_wrapper(fn, fn_or_cls, name, selector, whitelist, blacklist):
"""Creates the final Gin wrapper for the given function.
Args:
fn: The function that will be wrapped.
fn_or_cls: The original function or class being made configurable. This will
differ from `fn` when making a class configurable, in which case `fn` will
be the constructor/new function, while `fn_or_cls` will be the class.
name: The name given to the configurable.
selector: The full selector of the configurable (name including any module
components).
whitelist: A whitelist of configurable parameters.
blacklist: A blacklist of non-configurable parameters.
Returns:
The Gin wrapper around `fn`.
"""
# At this point we have access to the final function to be wrapped, so we
# can cache a few things here.
fn_descriptor = "'{}' ('{}')".format(name, fn_or_cls)
signature_required_kwargs = _get_validated_required_kwargs(
fn, fn_descriptor, whitelist, blacklist)
initial_configurable_defaults = _get_default_configurable_parameter_values(
fn, whitelist, blacklist)
@six.wraps(fn)
def gin_wrapper(*args, **kwargs):
"""Supplies fn with parameter values from the configuration."""
scope_components = current_scope()
new_kwargs = {}
for i in range(len(scope_components) + 1):
partial_scope_str = '/'.join(scope_components[:i])
new_kwargs.update(_CONFIG.get((partial_scope_str, selector), {}))
gin_bound_args = list(new_kwargs.keys())
scope_str = partial_scope_str
arg_names = _get_supplied_positional_parameter_names(fn, args)
for arg in args[len(arg_names):]:
if arg is REQUIRED:
raise ValueError(
'gin.REQUIRED is not allowed for unnamed (vararg) parameters. If '
'the function being called is wrapped by a non-Gin decorator, '
'try explicitly providing argument names for positional '
'parameters.')
required_arg_names = []
required_arg_indexes = []
for i, arg in enumerate(args[:len(arg_names)]):
if arg is REQUIRED:
required_arg_names.append(arg_names[i])
required_arg_indexes.append(i)
caller_required_kwargs = []
for kwarg, value in six.iteritems(kwargs):
if value is REQUIRED:
caller_required_kwargs.append(kwarg)
# If the caller passed arguments as positional arguments that correspond to
# a keyword arg in new_kwargs, remove the keyword argument from new_kwargs
# to let the caller win and avoid throwing an error. Unless it is an arg
# marked as REQUIRED.
for arg_name in arg_names:
if arg_name not in required_arg_names:
new_kwargs.pop(arg_name, None)
# Get default values for configurable parameters.
operative_parameter_values = initial_configurable_defaults.copy()
# Update with the values supplied via configuration.
operative_parameter_values.update(new_kwargs)
# Remove any values from the operative config that are overridden by the
# caller. These can't be configured, so they won't be logged. We skip values
# that are marked as REQUIRED.
for k in arg_names:
if k not in required_arg_names:
operative_parameter_values.pop(k, None)
for k in kwargs:
if k not in caller_required_kwargs:
operative_parameter_values.pop(k, None)
# An update is performed in case another caller of this same configurable
# object has supplied a different set of arguments. By doing an update, a
# Gin-supplied or default value will be present if it was used (not
# overridden by the caller) at least once.
with _OPERATIVE_CONFIG_LOCK:
op_cfg = _OPERATIVE_CONFIG.setdefault((scope_str, selector), {})
op_cfg.update(operative_parameter_values)
# We call deepcopy for two reasons: First, to prevent the called function
# from modifying any of the values in `_CONFIG` through references passed in
# via `new_kwargs`; Second, to facilitate evaluation of any
# `ConfigurableReference` instances buried somewhere inside `new_kwargs`.
# See the docstring on `ConfigurableReference.__deepcopy__` above for more
# details on the dark magic happening here.
new_kwargs = copy.deepcopy(new_kwargs)
# Validate args marked as REQUIRED have been bound in the Gin config.
missing_required_params = []
new_args = list(args)
for i, arg_name in zip(required_arg_indexes, required_arg_names):
if arg_name not in new_kwargs:
missing_required_params.append(arg_name)
else:
new_args[i] = new_kwargs.pop(arg_name)
# Validate kwargs marked as REQUIRED have been bound in the Gin config.
for required_kwarg in signature_required_kwargs:
if (required_kwarg not in arg_names and # not a positional arg
required_kwarg not in kwargs and # or a keyword arg
required_kwarg not in new_kwargs): # or bound in config
missing_required_params.append(required_kwarg)
for required_kwarg in caller_required_kwargs:
if required_kwarg not in new_kwargs:
missing_required_params.append(required_kwarg)
else:
# Remove from kwargs and let the new_kwargs value be used.
kwargs.pop(required_kwarg)
if missing_required_params:
missing_required_params = (
_order_by_signature(fn, missing_required_params))
err_str = 'Required bindings for `{}` not provided in config: {}'
minimal_selector = _REGISTRY.minimal_selector(selector)
err_str = err_str.format(minimal_selector, missing_required_params)
raise RuntimeError(err_str)
# Now, update with the caller-supplied `kwargs`, allowing the caller to have
# the final say on keyword argument values.
new_kwargs.update(kwargs)
try:
return fn(*new_args, **new_kwargs)
except Exception as e: # pylint: disable=broad-except
err_str = ''
if isinstance(e, TypeError):
all_arg_names = _get_all_positional_parameter_names(fn)
if len(new_args) < len(all_arg_names):
unbound_positional_args = list(
set(all_arg_names[len(new_args):]) - set(new_kwargs))
if unbound_positional_args:
caller_supplied_args = list(
set(arg_names + list(kwargs)) -
set(required_arg_names + list(caller_required_kwargs)))
fmt = ('\n No values supplied by Gin or caller for arguments: {}'
'\n Gin had values bound for: {gin_bound_args}'
'\n Caller supplied values for: {caller_supplied_args}')
canonicalize = lambda x: list(map(str, sorted(x)))
err_str += fmt.format(
canonicalize(unbound_positional_args),
gin_bound_args=canonicalize(gin_bound_args),
caller_supplied_args=canonicalize(caller_supplied_args))
err_str += "\n In call to configurable '{}' ({}){}"
scope_info = " in scope '{}'".format(scope_str) if scope_str else ''
err_str = err_str.format(name, fn_or_cls, scope_info)
utils.augment_exception_message_and_reraise(e, err_str)
return gin_wrapper
def _make_configurable(fn_or_cls,
name=None,
module=None,
whitelist=None,
blacklist=None,
subclass=False):
"""Wraps `fn_or_cls` to make it configurable.
Infers the configurable name from `fn_or_cls.__name__` if necessary, and
updates global state to keep track of configurable name <-> function
mappings, as well as whitelisted and blacklisted parameters.
Args:
fn_or_cls: The function or class to decorate.
name: A name for the configurable. If `None`, the name will be inferred from
from `fn_or_cls`. The `name` may also include module components to be used
for disambiguation (these will be appended to any components explicitly
specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. If `None`, `fn_or_cls.__module__` will be used (if no module
is specified as part of `name`).
whitelist: A whitelisted set of parameter names to supply values for.
blacklist: A blacklisted set of parameter names not to supply values for.
subclass: If `fn_or_cls` is a class and `subclass` is `True`, decorate by
subclassing `fn_or_cls` and overriding its `__init__` method. If `False`,
replace the existing `__init__` with a decorated version.
Returns:
A wrapped version of `fn_or_cls` that will take parameter values from the
global configuration.
Raises:
RuntimeError: If the config is locked.
ValueError: If a configurable with `name` (or the name of `fn_or_cls`)
already exists, or if both a whitelist and blacklist are specified.
"""
if config_is_locked():
err_str = 'Attempted to add a new configurable after the config was locked.'
raise RuntimeError(err_str)
name = fn_or_cls.__name__ if name is None else name
if config_parser.IDENTIFIER_RE.match(name):
default_module = getattr(fn_or_cls, '__module__', None)
module = default_module if module is None else module
elif not config_parser.MODULE_RE.match(name):
raise ValueError("Configurable name '{}' is invalid.".format(name))
if module is not None and not config_parser.MODULE_RE.match(module):
raise ValueError("Module '{}' is invalid.".format(module))
selector = module + '.' + name if module else name
if not _INTERACTIVE_MODE and selector in _REGISTRY:
err_str = ("A configurable matching '{}' already exists.\n\n"
'To allow re-registration of configurables in an interactive '
'environment, use:\n\n'
' gin.enter_interactive_mode()')
raise ValueError(err_str.format(selector))
if whitelist and blacklist:
err_str = 'A whitelist or a blacklist can be specified, but not both.'
raise ValueError(err_str)
if whitelist and not isinstance(whitelist, (list, tuple)):
raise TypeError('Whitelist should be a list or tuple.')
if blacklist and not isinstance(blacklist, (list, tuple)):
raise TypeError('Blacklist should be a list or tuple.')
_validate_parameters(fn_or_cls, whitelist, 'whitelist')
_validate_parameters(fn_or_cls, blacklist, 'blacklist')
def decorator(fn):
"""Wraps `fn` so that it obtains parameters from the configuration."""
return _make_gin_wrapper(fn, fn_or_cls, name, selector, whitelist,
blacklist)
decorated_fn_or_cls = _decorate_fn_or_cls(
decorator, fn_or_cls, subclass=subclass)
_REGISTRY[selector] = Configurable(
decorated_fn_or_cls,
name=name,
module=module,
whitelist=whitelist,
blacklist=blacklist,
selector=selector)
return decorated_fn_or_cls
def configurable(name_or_fn=None, module=None, whitelist=None, blacklist=None):
"""Decorator to make a function or class configurable.
This decorator registers the decorated function/class as configurable, which
allows its parameters to be supplied from the global configuration (i.e., set
through `bind_parameter` or `parse_config`). The decorated function is
associated with a name in the global configuration, which by default is simply
the name of the function or class, but can be specified explicitly to avoid
naming collisions or improve clarity.
If some parameters should not be configurable, they can be specified in
`blacklist`. If only a restricted set of parameters should be configurable,
they can be specified in `whitelist`.
The decorator can be used without any parameters as follows:
@config.configurable
def some_configurable_function(param1, param2='a default value'):
...
In this case, the function is associated with the name
`'some_configurable_function'` in the global configuration, and both `param1`
and `param2` are configurable.
The decorator can be supplied with parameters to specify the configurable name
or supply a whitelist/blacklist:
@config.configurable('explicit_configurable_name', whitelist='param2')
def some_configurable_function(param1, param2='a default value'):
...
In this case, the configurable is associated with the name
`'explicit_configurable_name'` in the global configuration, and only `param2`
is configurable.
Classes can be decorated as well, in which case parameters of their
constructors are made configurable:
@config.configurable
class SomeClass(object):
def __init__(self, param1, param2='a default value'):
...
In this case, the name of the configurable is `'SomeClass'`, and both `param1`
and `param2` are configurable.
Args:
name_or_fn: A name for this configurable, or a function to decorate (in
which case the name will be taken from that function). If not set,
defaults to the name of the function/class that is being made
configurable. If a name is provided, it may also include module components
to be used for disambiguation (these will be appended to any components
explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, the module of the function or class being made
configurable will be used (if no module is specified as part of the name).
whitelist: A whitelisted set of kwargs that should be configurable. All
other kwargs will not be configurable. Only one of `whitelist` or
`blacklist` should be specified.
blacklist: A blacklisted set of kwargs that should not be configurable. All
other kwargs will be configurable. Only one of `whitelist` or `blacklist`
should be specified.
Returns:
When used with no parameters (or with a function/class supplied as the first
parameter), it returns the decorated function or class. When used with
parameters, it returns a function that can be applied to decorate the target
function or class.
"""
decoration_target = None
if callable(name_or_fn):
decoration_target = name_or_fn
name = None
else:
name = name_or_fn
def perform_decoration(fn_or_cls):
return _make_configurable(fn_or_cls, name, module, whitelist, blacklist)
if decoration_target:
return perform_decoration(decoration_target)
return perform_decoration
def external_configurable(fn_or_cls,
name=None,
module=None,
whitelist=None,
blacklist=None):
"""Allow referencing/configuring an external class or function.
This alerts Gin to the existence of the class or function `fn_or_cls` in the
event that it can't be easily annotated with `@configurable` (for instance, if
it is from another project). This allows `fn_or_cls` to be configured and
referenced (using the `@name` notation) via parameter binding strings.
Note that only calls to the return value of this function or resulting from
references to `fn_or_cls` made through binding strings (configurations) will
have their parameters injected by Gin---explicit calls to `fn_or_cls` directly
won't have any parameter bindings applied.
Args:
fn_or_cls: The external function or class that should be made configurable.
name: The configurable name to be associated with `fn_or_cls`. The name may
also include module components to be used for disambiguation (these will
be appended to any components explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, `fn_or_cls.__module__` will be used (if no
module is specified as part of the name).
whitelist: A whitelist of parameter names to allow configuration for.
blacklist: A blacklist of parameter names not to allow configuration for.
Returns:
A decorated version of `fn_or_cls` that permits parameter binding. For
functions, this is just a wrapped version of the function. For classes, this
is a carefully constructed subclass of `fn_or_cls` designed to behave nearly
identically (even under many type inspection operations) save for the
addition of parameter binding.
"""
return _make_configurable(
fn_or_cls,
name=name,
module=module,
whitelist=whitelist,
blacklist=blacklist,
subclass=True)
def _config_str(configuration_object,
max_line_length=80,
continuation_indent=4):
"""Print the configuration specified in configuration object.
Args:
configuration_object: Either OPERATIVE_CONFIG_ (operative config) or _CONFIG
(all config, bound and unbound).
max_line_length: A (soft) constraint on the maximum length of a line in the
formatted string. Large nested structures will be split across lines, but
e.g. long strings won't be split into a concatenation of shorter strings.
continuation_indent: The indentation for continued lines.
Returns:
A config string capturing all parameter values set by the object.
"""
def format_binding(key, value):
"""Pretty print the given key/value pair."""
formatted_val = pprint.pformat(
value, width=(max_line_length - continuation_indent))
formatted_val_lines = formatted_val.split('\n')
if (len(formatted_val_lines) == 1 and
len(key + formatted_val) <= max_line_length):
output = '{} = {}'.format(key, formatted_val)
else:
indented_formatted_val = '\n'.join(
[' ' * continuation_indent + line for line in formatted_val_lines])
output = '{} = \\\n{}'.format(key, indented_formatted_val)
return output
def sort_key(key_tuple):
"""Sort configurable selector/innermost scopes, ignoring case."""
scope, selector = key_tuple[0]
parts = selector.lower().split('.')[::-1] + scope.lower().split('/')[::-1]
return '/'.join(parts)
# Build the output as an array of formatted Gin statements. Each statement may
# span multiple lines. Imports are first, followed by macros, and finally all
# other bindings sorted in alphabetical order by configurable name.
formatted_statements = [
'import {}'.format(module) for module in sorted(_IMPORTED_MODULES)
]
if formatted_statements:
formatted_statements.append('')
macros = {}
for (scope, selector), config in six.iteritems(configuration_object):
if _REGISTRY[selector].fn_or_cls == macro:
macros[scope, selector] = config
if macros:
formatted_statements.append('# Macros:')
formatted_statements.append('# ' + '=' * (max_line_length - 2))
for (name, _), config in sorted(macros.items(), key=sort_key):
binding = format_binding(name, config['value'])
formatted_statements.append(binding)
if macros:
formatted_statements.append('')
sorted_items = sorted(configuration_object.items(), key=sort_key)
for (scope, selector), config in sorted_items:
configurable_ = _REGISTRY[selector]
fn = configurable_.fn_or_cls
if fn == macro or fn == _retrieve_constant:
continue
minimal_selector = _REGISTRY.minimal_selector(configurable_.selector)
scoped_selector = (scope + '/' if scope else '') + minimal_selector
parameters = [(k, v) for k, v in six.iteritems(config)
if _is_literally_representable(v)]
formatted_statements.append('# Parameters for {}:'.format(scoped_selector))
formatted_statements.append('# ' + '=' * (max_line_length - 2))
for arg, val in sorted(parameters):
binding = format_binding('{}.{}'.format(scoped_selector, arg), val)
formatted_statements.append(binding)
if not parameters:
formatted_statements.append('# None.')
formatted_statements.append('')
return '\n'.join(formatted_statements)
def operative_config_str(max_line_length=80, continuation_indent=4):
"""Retrieve the "operative" configuration as a config string.
The operative configuration consists of all parameter values used by
configurable functions that are actually called during execution of the
current program. Parameters associated with configurable functions that are
not called (and so can have no effect on program execution) won't be included.
The goal of the function is to return a config that captures the full set of
relevant configurable "hyperparameters" used by a program. As such, the
returned configuration will include the default values of arguments from
configurable functions (as long as the arguments aren't blacklisted or missing
from a supplied whitelist), as well as any parameter values overridden via
`bind_parameter` or through `parse_config`.
Any parameters that can't be represented as literals (capable of being parsed
by `parse_config`) are excluded. The resulting config string is sorted
lexicographically and grouped by configurable name.
Args:
max_line_length: A (soft) constraint on the maximum length of a line in the
formatted string. Large nested structures will be split across lines, but
e.g. long strings won't be split into a concatenation of shorter strings.
continuation_indent: The indentation for continued lines.
Returns:
A config string capturing all parameter values set in the current program.
"""
return _config_str(_OPERATIVE_CONFIG, max_line_length, continuation_indent)
def config_str(max_line_length=80, continuation_indent=4):
"""Retrieve the interpreted configuration as a config string.
This is not the _operative configuration_, in that it includes parameter
values which are unused by by the program.
Args:
max_line_length: A (soft) constraint on the maximum length of a line in the
formatted string. Large nested structures will be split across lines, but
e.g. long strings won't be split into a concatenation of shorter strings.
continuation_indent: The indentation for continued lines.
Returns:
A config string capturing all parameter values used by the current program.
"""
return _config_str(_CONFIG, max_line_length, continuation_indent)
def parse_config(bindings, skip_unknown=False):
"""Parse a file, string, or list of strings containing parameter bindings.
Parses parameter binding strings to set up the global configuration. Once
`parse_config` has been called, any calls to configurable functions will have
parameter values set according to the values specified by the parameter
bindings in `bindings`.
An individual parameter binding has the format
maybe/some/scopes/configurable_name.parameter_name = value
Multiple binding strings can be passed either in the form of a file-like
object supporting the `readline` method, a single string with each individual
parameter binding separated by a newline, or as a list of individual parameter
binding strings.
Any Python literal (lists, tuples, dicts, strings, etc.) is acceptable to the
right of the equals sign, and follows standard Python rules for line
continuation. Additionally, a value starting with '@' is interpreted as a
(possibly scoped) reference to another configurable function, in which case
this value is replaced by a reference to that function. If the value
furthermore ends in `()` (e.g., `@configurable_name()`), then the value
returned when calling the function is used (it will be called *just before*
the function consuming the output is called).
See the module documentation for a more detailed description of scoping
mechanisms and a complete example.
Reading from a file could be done as follows:
with open('/path/to/file.config') as bindings:
gin.parse_config(bindings)
Passing a newline separated string of parameter bindings might look like:
bindings = '''
my_class.param_one = 'asdf'
my_class_param_two = 9.7
'''
gin.parse_config(bindings)
Alternatively, one can declare a list of parameter bindings and pass it in:
bindings = [
'my_class.param_one = "asdf"',
'my_class.param_two = 9.7',
]
gin.parse_config(bindings)
Can skip unknown configurables. For example, if no module containing a
'training' configurable was imported, errors can be avoided by specifying
`skip_unknown=True`:
bindings = [
'my_class.param_one = "asdf"',
'my_class.param_two = 9.7',
'training.learning_rate = 0.1',
]
gin.parse_config(bindings, skip_unknown=True)
Args:
bindings: A file-like object supporting the readline method, a newline
separated string of parameter bindings, or a list of individual parameter
binding strings.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped (instead of causing an error). Configurable references
to unknown configurables will cause errors if they are present in a
binding that is not itself skipped due to an unknown configurable. This
can also be a list of configurable names: any unknown configurables that
do not match an item in the list will still cause errors. Note that
bindings for known configurables will always be parsed.
"""
if isinstance(bindings, (list, tuple)):
bindings = '\n'.join(bindings)
_validate_skip_unknown(skip_unknown)
if isinstance(skip_unknown, (list, tuple)):
skip_unknown = set(skip_unknown)
parser = config_parser.ConfigParser(bindings, ParserDelegate(skip_unknown))
for statement in parser:
if isinstance(statement, config_parser.BindingStatement):
scope, selector, arg_name, value, location = statement
if not arg_name:
macro_name = '{}/{}'.format(scope, selector) if scope else selector
with utils.try_with_location(location):
bind_parameter((macro_name, 'gin.macro', 'value'), value)
continue
if not _should_skip(selector, skip_unknown):
with utils.try_with_location(location):
bind_parameter((scope, selector, arg_name), value)
elif isinstance(statement, config_parser.ImportStatement):
if skip_unknown:
try:
__import__(statement.module)
_IMPORTED_MODULES.add(statement.module)
except ImportError:
tb_len = len(traceback.extract_tb(sys.exc_info()[2]))
log_str = ('Skipping import of unknown module `%s` '
'(skip_unknown=True).')
log_args = [statement.module]
if tb_len > 1:
# In case the error comes from a nested import (i.e. the module is
# available, but it imports some unavailable module), print the
# traceback to avoid confusion.
log_str += '\n%s'
log_args.append(traceback.format_exc())
logging.info(log_str, *log_args)
else:
with utils.try_with_location(statement.location):
__import__(statement.module)
_IMPORTED_MODULES.add(statement.module)
elif isinstance(statement, config_parser.IncludeStatement):
with utils.try_with_location(statement.location):
parse_config_file(statement.filename, skip_unknown)
else:
raise AssertionError('Unrecognized statement type {}.'.format(statement))
def register_file_reader(*args):
"""Register a file reader for use in parse_config_file.
Registered file readers will be used to try reading files passed to
`parse_config_file`. All file readers (beginning with the default `open`) will
be tried until one of them succeeds at opening the file.
This function may also be be used used as a decorator. For example:
@register_file_reader(IOError)
def exotic_data_source(filename):
...
Args:
*args: (When used as a decorator, only the existence check is supplied.)
- file_reader_fn: The file reader function to register. This should be a
function that can be used as a context manager to open a file and
provide a file-like object, similar to Python's built-in `open`.
- is_readable_fn: A function taking the file path and returning a boolean
indicating whether the file can be read by `file_reader_fn`.
Returns:
`None`, or when used as a decorator, a function that will perform the
registration using the supplied readability predicate.
"""
def do_registration(file_reader_fn, is_readable_fn):
if file_reader_fn not in list(zip(*_FILE_READERS))[0]:
_FILE_READERS.append((file_reader_fn, is_readable_fn))
if len(args) == 1: # It's a decorator.
return functools.partial(do_registration, is_readable_fn=args[0])
elif len(args) == 2:
do_registration(*args)
else: # 0 or > 2 arguments supplied.
err_str = 'register_file_reader() takes 1 or 2 arguments ({} given)'
raise TypeError(err_str.format(len(args)))
def add_config_file_search_path(location_prefix):
"""Adds a path that will be searched for config files by parse_config_file."""
_LOCATION_PREFIXES.append(location_prefix)
def parse_config_file(config_file, skip_unknown=False):
"""Parse a Gin config file.
Args:
config_file: The path to a Gin config file.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
Raises:
IOError: If `config_file` cannot be read using any register file reader.
"""
prefixes = _LOCATION_PREFIXES if not os.path.isabs(config_file) else ['']
for location_prefix in prefixes:
config_file_with_prefix = os.path.join(location_prefix, config_file)
for reader, existence_check in _FILE_READERS:
if existence_check(config_file_with_prefix):
with reader(config_file_with_prefix) as f:
parse_config(f, skip_unknown=skip_unknown)
return
err_str = 'Unable to open file: {}. Searched config paths: {}.'
raise IOError(err_str.format(config_file, prefixes))
def parse_config_files_and_bindings(config_files,
bindings,
finalize_config=True,
skip_unknown=False):
"""Parse a list of config files followed by extra Gin bindings.
This function is equivalent to:
for config_file in config_files:
gin.parse_config_file(config_file, skip_configurables)
gin.parse_config(bindings, skip_configurables)
if finalize_config:
gin.finalize()
Args:
config_files: A list of paths to the Gin config files.
bindings: A list of individual parameter binding strings.
finalize_config: Whether to finalize the config after parsing and binding
(defaults to True).
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
"""
if config_files is None:
config_files = []
if bindings is None:
bindings = ''
for config_file in config_files:
parse_config_file(config_file, skip_unknown)
parse_config(bindings, skip_unknown)
if finalize_config:
finalize()
def parse_value(value):
"""Parse and return a single Gin value."""
if not isinstance(value, six.string_types):
raise ValueError('value ({}) should be a string type.'.format(value))
return config_parser.ConfigParser(value, ParserDelegate()).parse_value()
def config_is_locked():
return _CONFIG_IS_LOCKED
def _set_config_is_locked(is_locked):
global _CONFIG_IS_LOCKED
_CONFIG_IS_LOCKED = is_locked
@contextlib.contextmanager
def unlock_config():
"""A context manager that temporarily unlocks the config.
Once the config has been locked by `gin.finalize`, it can only be modified
using this context manager (to make modifications explicit). Example:
with gin.unlock_config():
...
gin.bind_parameter(...)
In the case where the config is already unlocked, this does nothing (the
config remains unlocked).
Yields:
None.
"""
config_was_locked = config_is_locked()
_set_config_is_locked(False)
yield
_set_config_is_locked(config_was_locked)
def enter_interactive_mode():
global _INTERACTIVE_MODE
_INTERACTIVE_MODE = True
def exit_interactive_mode():
global _INTERACTIVE_MODE
_INTERACTIVE_MODE = False
@contextlib.contextmanager
def interactive_mode():
try:
enter_interactive_mode()
yield
finally:
exit_interactive_mode()
def finalize():
"""A function that should be called after parsing all Gin config files.
Calling this function allows registered "finalize hooks" to inspect (and
potentially modify) the Gin config, to provide additional functionality. Hooks
should not modify the configuration object they receive directly; instead,
they should return a dictionary mapping Gin binding keys to (new or updated)
values. This way, all hooks see the config as originally parsed.
Raises:
RuntimeError: If the config is already locked.
ValueError: If two or more hooks attempt to modify or introduce bindings for
the same key. Since it is difficult to control the order in which hooks
are registered, allowing this could yield unpredictable behavior.
"""
if config_is_locked():
raise RuntimeError('Finalize called twice (config already locked).')
bindings = {}
for hook in _FINALIZE_HOOKS:
new_bindings = hook(_CONFIG)
if new_bindings is not None:
for key, value in six.iteritems(new_bindings):
pbk = ParsedBindingKey(key)
if pbk in bindings:
err_str = 'Received conflicting updates when running {}.'
raise ValueError(err_str.format(hook))
bindings[pbk] = value
for pbk, value in six.iteritems(bindings):
bind_parameter(pbk, value)
_set_config_is_locked(True)
def register_finalize_hook(fn):
"""Registers `fn` as a hook that will run during `gin.finalize`.
All finalize hooks should accept the current config, and return a dictionary
containing any additional parameter bindings that should occur in the form of
a mapping from (scoped) configurable names to values.
Args:
fn: The function to register.
Returns:
`fn`, allowing `register_finalize_hook` to be used as a decorator.
"""
_FINALIZE_HOOKS.append(fn)
return fn
def _iterate_flattened_values(value):
"""Provides an iterator over all values in a nested structure."""
if isinstance(value, six.string_types):
yield value
return
if isinstance(value, collections.Mapping):
value = collections.ValuesView(value)
if isinstance(value, collections.Iterable):
for nested_value in value:
for nested_nested_value in _iterate_flattened_values(nested_value):
yield nested_nested_value
yield value
def iterate_references(config, to=None):
"""Provides an iterator over references in the given config.
Args:
config: A dictionary mapping scoped configurable names to argument bindings.
to: If supplied, only yield references whose `configurable_fn` matches `to`.
Yields:
`ConfigurableReference` instances within `config`, maybe restricted to those
matching the `to` parameter if it is supplied.
"""
for value in _iterate_flattened_values(config):
if isinstance(value, ConfigurableReference):
if to is None or value.configurable.fn_or_cls == to:
yield value
def validate_reference(ref, require_bindings=True, require_evaluation=False):
if require_bindings and ref.config_key not in _CONFIG:
err_str = "No bindings specified for '{}'."
raise ValueError(err_str.format(ref.scoped_selector))
if require_evaluation and not ref.evaluate:
err_str = "Reference '{}' must be evaluated (add '()')."
raise ValueError(err_str.format(ref))
@configurable(module='gin')
def macro(value):
"""A Gin macro."""
return value
@configurable('constant', module='gin')
def _retrieve_constant():
"""Fetches and returns a constant from the _CONSTANTS map."""
return _CONSTANTS[current_scope_str()]
@configurable(module='gin')
def singleton(constructor):
return singleton_value(current_scope_str(), constructor)
def singleton_value(key, constructor=None):
if key not in _SINGLETONS:
if not constructor:
err_str = "No singleton found for key '{}', and no constructor was given."
raise ValueError(err_str.format(key))
if not callable(constructor):
err_str = "The constructor for singleton '{}' is not callable."
raise ValueError(err_str.format(key))
_SINGLETONS[key] = constructor()
return _SINGLETONS[key]
def constant(name, value):
"""Creates a constant that can be referenced from gin config files.
After calling this function in Python, the constant can be referenced from
within a Gin config file using the macro syntax. For example, in Python:
gin.constant('THE_ANSWER', 42)
Then, in a Gin config file:
meaning.of_life = %THE_ANSWER
Note that any Python object can be used as the value of a constant (including
objects not representable as Gin literals). Values will be stored until
program termination in a Gin-internal dictionary, so avoid creating constants
with values that should have a limited lifetime.
Optionally, a disambiguating module may be prefixed onto the constant
name. For instance:
gin.constant('some.modules.PI', 3.14159)
Args:
name: The name of the constant, possibly prepended by one or more
disambiguating module components separated by periods. An macro with this
name (including the modules) will be created.
value: The value of the constant. This can be anything (including objects
not representable as Gin literals). The value will be stored and returned
whenever the constant is referenced.
Raises:
ValueError: If the constant's selector is invalid, or a constant with the
given selector already exists.
"""
if not config_parser.MODULE_RE.match(name):
raise ValueError("Invalid constant selector '{}'.".format(name))
if _CONSTANTS.matching_selectors(name):
err_str = "Constants matching selector '{}' already exist ({})."
raise ValueError(err_str.format(name, _CONSTANTS.matching_selectors(name)))
_CONSTANTS[name] = value
def constants_from_enum(cls=None, module=None):
"""Decorator for an enum class that generates Gin constants from values.
Generated constants have format `module.ClassName.ENUM_VALUE`. The module
name is optional when using the constant.
Args:
cls: Class type.
module: The module to associate with the constants, to help handle naming
collisions. If `None`, `cls.__module__` will be used.
Returns:
Class type (identity function).
Raises:
TypeError: When applied to a non-enum class.
"""
def decorator(cls, module=module):
if not issubclass(cls, enum.Enum):
raise TypeError(
"Class '{}' is not subclass of enum.".format(cls.__name__))
if module is None:
module = cls.__module__
for value in cls:
constant('{}.{}'.format(module, str(value)), value)
return cls
if cls is None:
return decorator
return decorator(cls)
@register_finalize_hook
def validate_macros_hook(config):
for ref in iterate_references(config, to=macro):
validate_reference(ref, require_evaluation=True)
@register_finalize_hook
def find_unknown_references_hook(config):
"""Hook to find/raise errors for references to unknown configurables."""
additional_msg_fmt = " In binding for '{}'."
for (scope, selector), param_bindings in six.iteritems(config):
for param_name, param_value in six.iteritems(param_bindings):
for maybe_unknown in _iterate_flattened_values(param_value):
if isinstance(maybe_unknown, _UnknownConfigurableReference):
scope_str = scope + '/' if scope else ''
min_selector = _REGISTRY.minimal_selector(selector)
binding_key = '{}{}.{}'.format(scope_str, min_selector, param_name)
additional_msg = additional_msg_fmt.format(binding_key)
_raise_unknown_reference_error(maybe_unknown, additional_msg)
Report the config string when a reference cannot be validated.
PiperOrigin-RevId: 281858052
# coding=utf-8
# Copyright 2018 The Gin-Config Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the Gin configuration framework.
Programs frequently have a number of "hyperparameters" that require variation
across different executions of the program. When the number of such parameters
grows even moderately large, or use of some parameter is deeply embedded in the
code, top-level flags become very cumbersome. This module provides an
alternative mechanism for setting such hyperparameters, by allowing injection of
parameter values for any function marked as "configurable".
For detailed documentation, please see the user guide:
https://github.com/google/gin-config/tree/master/docs/index.md
# Making functions and classes configurable
Functions and classes can be marked configurable using the `@configurable`
decorator, which associates a "configurable name" with the function or class (by
default, just the function or class name). Optionally, parameters can be
whitelisted or blacklisted to mark only a subset of the function's parameters as
configurable. Once parameters have been bound (see below) to this function, any
subsequent calls will have those parameters automatically supplied by Gin.
If an argument supplied to a function by its caller (either as a positional
argument or as a keyword argument) corresponds to a parameter configured by Gin,
the caller's value will take precedence.
# A short example
Python code:
@gin.configurable
def mix_cocktail(ingredients):
...
@gin.configurable
def serve_random_cocktail(available_cocktails):
...
@gin.configurable
def drink(cocktail):
...
Gin configuration:
martini/mix_cocktail.ingredients = ['gin', 'vermouth', 'twist of lemon']
gin_and_tonic/mix_cocktail.ingredients = ['gin', 'tonic water']
serve_random_cocktail.available_cocktails = {
'martini': @martini/mix_cocktail,
'gin_and_tonic': @gin_and_tonic/mix_cocktail,
}
drink.cocktail = @serve_random_cocktail()
In the above example, there are three configurable functions: `mix_cocktail`
(with a parameter `ingredients`), `serve_random_cocktail` (with parameter
`available_cocktails`), and `drink` (with parameter `cocktail`).
When `serve_random_cocktail` is called, it will receive a dictionary
containing two scoped *references* to the `mix_cocktail` function (each scope
providing unique parameters, meaning calling the different references will
presumably produce different outputs).
On the other hand, when the `drink` function is called, it will receive the
*output* of calling `serve_random_cocktail` as the value of its `cocktail`
parameter, due to the trailing `()` in `@serve_random_cocktail()`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import functools
import inspect
import logging
import os
import pprint
import sys
import threading
import traceback
import enum
from gin import config_parser
from gin import selector_map
from gin import utils
import six
class _ScopeManager(threading.local):
"""Manages currently active config scopes.
This ensures thread safety of config scope management by subclassing
`threading.local`. Scopes are tracked as a stack, where elements in the
stack are lists of the currently active scope names.
"""
def _maybe_init(self):
if not hasattr(self, '_active_scopes'):
self._active_scopes = [[]]
@property
def active_scopes(self):
self._maybe_init()
return self._active_scopes[:]
@property
def current_scope(self):
self._maybe_init()
return self._active_scopes[-1][:] # Slice to get copy.
def enter_scope(self, scope):
"""Enters the given scope, updating the list of active scopes.
Args:
scope: A list of active scope names, ordered from outermost to innermost.
"""
self._maybe_init()
self._active_scopes.append(scope)
def exit_scope(self):
"""Exits the most recently entered scope."""
self._maybe_init()
self._active_scopes.pop()
# Maintains the registry of configurable functions and classes.
_REGISTRY = selector_map.SelectorMap()
# Maps tuples of `(scope, selector)` to associated parameter values. This
# specifies the current global "configuration" set through `bind_parameter` or
# `parse_config`, but doesn't include any functions' default argument values.
_CONFIG = {}
# Keeps a set of module names that were dynamically imported via config files.
_IMPORTED_MODULES = set()
# Maps `(scope, selector)` tuples to all configurable parameter values used
# during program execution (including default argument values).
_OPERATIVE_CONFIG = {}
_OPERATIVE_CONFIG_LOCK = threading.Lock()
# Keeps track of currently active config scopes.
_SCOPE_MANAGER = _ScopeManager()
# Keeps track of hooks to run when the Gin config is finalized.
_FINALIZE_HOOKS = []
# Keeps track of whether the config is locked.
_CONFIG_IS_LOCKED = False
# Keeps track of whether "interactive mode" is enabled, in which case redefining
# a configurable is not an error.
_INTERACTIVE_MODE = False
# Keeps track of constants created via gin.constant, to both prevent duplicate
# definitions and to avoid writing them to the operative config.
_CONSTANTS = selector_map.SelectorMap()
# Keeps track of singletons created via the singleton configurable.
_SINGLETONS = {}
# Keeps track of file readers. These are functions that behave like Python's
# `open` function (can be used a context manager) and will be used to load
# config files. Each element of this list should be a tuple of `(function,
# exception_type)`, where `exception_type` is the type of exception thrown by
# `function` when a file can't be opened/read successfully.
_FILE_READERS = [(open, os.path.isfile)]
# Maintains a cache of argspecs for functions.
_ARG_SPEC_CACHE = {}
# List of location prefixes. Similar to PATH var in unix to be used to search
# for files with those prefixes.
_LOCATION_PREFIXES = ['']
# Value to represent required parameters.
REQUIRED = object()
def _find_class_construction_fn(cls):
"""Find the first __init__ or __new__ method in the given class's MRO."""
for base in type.mro(cls):
if '__init__' in base.__dict__:
return base.__init__
if '__new__' in base.__dict__:
return base.__new__
def _ensure_wrappability(fn):
"""Make sure `fn` can be wrapped cleanly by functools.wraps."""
# Handle "builtin_function_or_method", "wrapped_descriptor", and
# "method-wrapper" types.
unwrappable_types = (type(sum), type(object.__init__), type(object.__call__))
if isinstance(fn, unwrappable_types):
# pylint: disable=unnecessary-lambda
wrappable_fn = lambda *args, **kwargs: fn(*args, **kwargs)
wrappable_fn.__name__ = fn.__name__
wrappable_fn.__doc__ = fn.__doc__
wrappable_fn.__module__ = '' # These types have no __module__, sigh.
wrappable_fn.__wrapped__ = fn
return wrappable_fn
# Otherwise we're good to go...
return fn
def _decorate_fn_or_cls(decorator, fn_or_cls, subclass=False):
"""Decorate a function or class with the given decorator.
When `fn_or_cls` is a function, applies `decorator` to the function and
returns the (decorated) result.
When `fn_or_cls` is a class and the `subclass` parameter is `False`, this will
replace `fn_or_cls.__init__` with the result of applying `decorator` to it.
When `fn_or_cls` is a class and `subclass` is `True`, this will subclass the
class, but with `__init__` defined to be the result of applying `decorator` to
`fn_or_cls.__init__`. The decorated class has metadata (docstring, name, and
module information) copied over from `fn_or_cls`. The goal is to provide a
decorated class the behaves as much like the original as possible, without
modifying it (for example, inspection operations using `isinstance` or
`issubclass` should behave the same way as on the original class).
Args:
decorator: The decorator to use.
fn_or_cls: The function or class to decorate.
subclass: Whether to decorate classes by subclassing. This argument is
ignored if `fn_or_cls` is not a class.
Returns:
The decorated function or class.
"""
if not inspect.isclass(fn_or_cls):
return decorator(_ensure_wrappability(fn_or_cls))
construction_fn = _find_class_construction_fn(fn_or_cls)
if subclass:
class DecoratedClass(fn_or_cls):
__doc__ = fn_or_cls.__doc__
__module__ = fn_or_cls.__module__
DecoratedClass.__name__ = fn_or_cls.__name__
if six.PY3:
DecoratedClass.__qualname__ = fn_or_cls.__qualname__
cls = DecoratedClass
else:
cls = fn_or_cls
decorated_fn = decorator(_ensure_wrappability(construction_fn))
if construction_fn.__name__ == '__new__':
decorated_fn = staticmethod(decorated_fn)
setattr(cls, construction_fn.__name__, decorated_fn)
return cls
class Configurable(
collections.namedtuple('Configurable', [
'fn_or_cls', 'name', 'module', 'whitelist', 'blacklist', 'selector'
])):
pass
def _raise_unknown_reference_error(ref, additional_msg=''):
err_str = "No configurable matching reference '@{}{}'.{}"
maybe_parens = '()' if ref.evaluate else ''
raise ValueError(err_str.format(ref.selector, maybe_parens, additional_msg))
class ConfigurableReference(object):
"""Represents a reference to a configurable function or class."""
def __init__(self, scoped_selector, evaluate):
self._scoped_selector = scoped_selector
self._evaluate = evaluate
scoped_selector_parts = self._scoped_selector.split('/')
self._scopes = scoped_selector_parts[:-1]
self._selector = scoped_selector_parts[-1]
self._configurable = _REGISTRY.get_match(self._selector)
if not self._configurable:
_raise_unknown_reference_error(self)
def reference_decorator(fn):
if self._scopes:
@six.wraps(fn)
def scoping_wrapper(*args, **kwargs):
with config_scope(self._scopes):
return fn(*args, **kwargs)
return scoping_wrapper
return fn
self._scoped_configurable_fn = _decorate_fn_or_cls(
reference_decorator, self.configurable.fn_or_cls, True)
@property
def configurable(self):
return self._configurable
@property
def scoped_configurable_fn(self):
return self._scoped_configurable_fn
@property
def scopes(self):
return self._scopes
@property
def selector(self):
return self._selector
@property
def scoped_selector(self):
return self._scoped_selector
@property
def config_key(self):
return ('/'.join(self._scopes), self._configurable.selector)
@property
def evaluate(self):
return self._evaluate
def __eq__(self, other):
if isinstance(other, self.__class__):
# pylint: disable=protected-access
return (
self._configurable == other._configurable and
self._evaluate == other._evaluate)
# pylint: enable=protected-access
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
# Check if this reference is a macro or constant, i.e. @.../macro() or
# @.../constant(). Only macros and constants correspond to the %... syntax.
configurable_fn = self._configurable.fn_or_cls
if configurable_fn in (macro, _retrieve_constant) and self._evaluate:
return '%' + '/'.join(self._scopes)
maybe_parens = '()' if self._evaluate else ''
return '@{}{}'.format(self._scoped_selector, maybe_parens)
def __deepcopy__(self, memo):
"""Dishonestly implements the __deepcopy__ special method.
When called, this returns either the `ConfigurableReference` instance itself
(when `self._evaluate` is `False`) or the result of calling the underlying
configurable. Configurable references may be deeply nested inside other
Python data structures, and by providing this implementation,
`copy.deepcopy` can be used on the containing Python structure to return a
copy replacing any `ConfigurableReference` marked for evaluation with its
corresponding configurable's output.
Args:
memo: The memoization dict (unused).
Returns:
When `self._evaluate` is `False`, returns the underlying configurable
(maybe wrapped to be called in the proper scope). When `self._evaluate` is
`True`, returns the output of calling the underlying configurable.
"""
if self._evaluate:
return self._scoped_configurable_fn()
return self._scoped_configurable_fn
class _UnknownConfigurableReference(object):
"""Represents a reference to an unknown configurable.
This class acts as a substitute for `ConfigurableReference` when the selector
doesn't match any known configurable.
"""
def __init__(self, selector, evaluate):
self._selector = selector.split('/')[-1]
self._evaluate = evaluate
@property
def selector(self):
return self._selector
@property
def evaluate(self):
return self._evaluate
def __deepcopy__(self, memo):
"""Dishonestly implements the __deepcopy__ special method.
See `ConfigurableReference` above. If this method is called, it means there
was an attempt to use this unknown configurable reference, so we throw an
error here.
Args:
memo: The memoization dict (unused).
Raises:
ValueError: To report that there is no matching configurable.
"""
addl_msg = '\n\n To catch this earlier, ensure gin.finalize() is called.'
_raise_unknown_reference_error(self, addl_msg)
def _validate_skip_unknown(skip_unknown):
if not isinstance(skip_unknown, (bool, list, tuple, set)):
err_str = 'Invalid value for `skip_unknown`: {}'
raise ValueError(err_str.format(skip_unknown))
def _should_skip(selector, skip_unknown):
"""Checks whether `selector` should be skipped (if unknown)."""
_validate_skip_unknown(skip_unknown)
if _REGISTRY.matching_selectors(selector):
return False # Never skip known configurables.
if isinstance(skip_unknown, (list, tuple, set)):
return selector in skip_unknown
return skip_unknown # Must be a bool by validation check.
class ParserDelegate(config_parser.ParserDelegate):
"""Delegate to handle creation of configurable references and macros."""
def __init__(self, skip_unknown=False):
self._skip_unknown = skip_unknown
def configurable_reference(self, scoped_selector, evaluate):
unscoped_selector = scoped_selector.rsplit('/', 1)[-1]
if _should_skip(unscoped_selector, self._skip_unknown):
return _UnknownConfigurableReference(scoped_selector, evaluate)
return ConfigurableReference(scoped_selector, evaluate)
def macro(self, name):
matching_selectors = _CONSTANTS.matching_selectors(name)
if matching_selectors:
if len(matching_selectors) == 1:
name = matching_selectors[0]
return ConfigurableReference(name + '/gin.constant', True)
err_str = "Ambiguous constant selector '{}', matches {}."
raise ValueError(err_str.format(name, matching_selectors))
return ConfigurableReference(name + '/gin.macro', True)
class ParsedBindingKey(
collections.namedtuple('ParsedBindingKey', [
'scope', 'given_selector', 'complete_selector', 'arg_name'
])):
"""Represents a parsed and validated binding key.
A "binding key" identifies a specific parameter (`arg_name`), of a specific
configurable (`complete_selector`), in a specific scope (`scope`), to which a
value may be bound in the global configuration. The `given_selector` field
retains information about how the original configurable selector was
specified, which can be helpful for error messages (but is ignored for the
purposes of equality and hashing).
"""
def __new__(cls, binding_key):
"""Parses and validates the given binding key.
This function will parse `binding_key` (if necessary), and ensure that the
specified parameter can be bound for the given configurable selector (i.e.,
that the parameter isn't blacklisted or not whitelisted if a whitelist was
provided).
Args:
binding_key: A spec identifying a parameter of a configurable (maybe in
some scope). This should either be a string of the form
'maybe/some/scope/maybe.modules.configurable_name.parameter_name'; or a
list or tuple of `(scope, selector, arg_name)`; or another instance of
`ParsedBindingKey`.
Returns:
A new instance of `ParsedBindingKey`.
Raises:
ValueError: If no function can be found matching the configurable name
specified by `binding_key`, or if the specified parameter name is
blacklisted or not in the function's whitelist (if present).
"""
if isinstance(binding_key, ParsedBindingKey):
return super(ParsedBindingKey, cls).__new__(cls, *binding_key)
if isinstance(binding_key, (list, tuple)):
scope, selector, arg_name = binding_key
elif isinstance(binding_key, six.string_types):
scope, selector, arg_name = config_parser.parse_binding_key(binding_key)
else:
err_str = 'Invalid type for binding_key: {}.'
raise ValueError(err_str.format(type(binding_key)))
configurable_ = _REGISTRY.get_match(selector)
if not configurable_:
raise ValueError("No configurable matching '{}'.".format(selector))
if not _might_have_parameter(configurable_.fn_or_cls, arg_name):
err_str = "Configurable '{}' doesn't have a parameter named '{}'."
raise ValueError(err_str.format(selector, arg_name))
if configurable_.whitelist and arg_name not in configurable_.whitelist:
err_str = "Configurable '{}' doesn't include kwarg '{}' in its whitelist."
raise ValueError(err_str.format(selector, arg_name))
if configurable_.blacklist and arg_name in configurable_.blacklist:
err_str = "Configurable '{}' has blacklisted kwarg '{}'."
raise ValueError(err_str.format(selector, arg_name))
return super(ParsedBindingKey, cls).__new__(
cls,
scope=scope,
given_selector=selector,
complete_selector=configurable_.selector,
arg_name=arg_name)
@property
def config_key(self):
return self.scope, self.complete_selector
@property
def scope_selector_arg(self):
return self.scope, self.complete_selector, self.arg_name
def __equal__(self, other):
# Equality ignores the `given_selector` field, since two binding keys should
# be equal whenever they identify the same parameter.
return self.scope_selector_arg == other.scope_selector_arg
def __hash__(self):
return hash(self.scope_selector_arg)
def _format_value(value):
"""Returns `value` in a format parseable by `parse_value`, or `None`.
Simply put, This function ensures that when it returns a string value, the
following will hold:
parse_value(_format_value(value)) == value
Args:
value: The value to format.
Returns:
A string representation of `value` when `value` is literally representable,
or `None`.
"""
literal = repr(value)
try:
if parse_value(literal) == value:
return literal
except SyntaxError:
pass
return None
def _is_literally_representable(value):
"""Returns `True` if `value` can be (parseably) represented as a string.
Args:
value: The value to check.
Returns:
`True` when `value` can be represented as a string parseable by
`parse_literal`, `False` otherwise.
"""
return _format_value(value) is not None
def clear_config(clear_constants=False):
"""Clears the global configuration.
This clears any parameter values set by `bind_parameter` or `parse_config`, as
well as the set of dynamically imported modules. It does not remove any
configurable functions or classes from the registry of configurables.
Args:
clear_constants: Whether to clear constants created by `constant`. Defaults
to False.
"""
_set_config_is_locked(False)
_CONFIG.clear()
_SINGLETONS.clear()
if clear_constants:
_CONSTANTS.clear()
else:
saved_constants = _CONSTANTS.copy()
_CONSTANTS.clear() # Clear then redefine constants (re-adding bindings).
for name, value in six.iteritems(saved_constants):
constant(name, value)
_IMPORTED_MODULES.clear()
_OPERATIVE_CONFIG.clear()
def bind_parameter(binding_key, value):
"""Binds the parameter value specified by `binding_key` to `value`.
The `binding_key` argument should either be a string of the form
`maybe/scope/optional.module.names.configurable_name.parameter_name`, or a
list or tuple of `(scope, selector, parameter_name)`, where `selector`
corresponds to `optional.module.names.configurable_name`. Once this function
has been called, subsequent calls (in the specified scope) to the specified
configurable function will have `value` supplied to their `parameter_name`
parameter.
Example:
@configurable('fully_connected_network')
def network_fn(num_layers=5, units_per_layer=1024):
...
def main(_):
config.bind_parameter('fully_connected_network.num_layers', 3)
network_fn() # Called with num_layers == 3, not the default of 5.
Args:
binding_key: The parameter whose value should be set. This can either be a
string, or a tuple of the form `(scope, selector, parameter)`.
value: The desired value.
Raises:
RuntimeError: If the config is locked.
ValueError: If no function can be found matching the configurable name
specified by `binding_key`, or if the specified parameter name is
blacklisted or not in the function's whitelist (if present).
"""
if config_is_locked():
raise RuntimeError('Attempted to modify locked Gin config.')
pbk = ParsedBindingKey(binding_key)
fn_dict = _CONFIG.setdefault(pbk.config_key, {})
fn_dict[pbk.arg_name] = value
def query_parameter(binding_key):
"""Returns the currently bound value to the specified `binding_key`.
The `binding_key` argument should look like
'maybe/some/scope/maybe.modules.configurable_name.parameter_name'. Note that
this will not include default parameters.
Args:
binding_key: The parameter whose value should be queried.
Returns:
The value bound to the configurable/parameter combination given in
`binding_key`.
Raises:
ValueError: If no function can be found matching the configurable name
specified by `biding_key`, or if the specified parameter name is
blacklisted or not in the function's whitelist (if present) or if there is
no value bound for the queried parameter or configurable.
"""
if config_parser.MODULE_RE.match(binding_key):
matching_selectors = _CONSTANTS.matching_selectors(binding_key)
if len(matching_selectors) == 1:
return _CONSTANTS[matching_selectors[0]]
elif len(matching_selectors) > 1:
err_str = "Ambiguous constant selector '{}', matches {}."
raise ValueError(err_str.format(binding_key, matching_selectors))
pbk = ParsedBindingKey(binding_key)
if pbk.config_key not in _CONFIG:
err_str = "Configurable '{}' has no bound parameters."
raise ValueError(err_str.format(pbk.given_selector))
if pbk.arg_name not in _CONFIG[pbk.config_key]:
err_str = "Configurable '{}' has no value bound for parameter '{}'."
raise ValueError(err_str.format(pbk.given_selector, pbk.arg_name))
return _CONFIG[pbk.config_key][pbk.arg_name]
def _might_have_parameter(fn_or_cls, arg_name):
"""Returns True if `arg_name` might be a valid parameter for `fn_or_cls`.
Specifically, this means that `fn_or_cls` either has a parameter named
`arg_name`, or has a `**kwargs` parameter.
Args:
fn_or_cls: The function or class to check.
arg_name: The name fo the parameter.
Returns:
Whether `arg_name` might be a valid argument of `fn`.
"""
if inspect.isclass(fn_or_cls):
fn = _find_class_construction_fn(fn_or_cls)
else:
fn = fn_or_cls
while hasattr(fn, '__wrapped__'):
fn = fn.__wrapped__
arg_spec = _get_cached_arg_spec(fn)
if six.PY3:
if arg_spec.varkw:
return True
return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs
else:
if arg_spec.keywords:
return True
return arg_name in arg_spec.args
def _validate_parameters(fn_or_cls, arg_name_list, err_prefix):
for arg_name in arg_name_list or []:
if not _might_have_parameter(fn_or_cls, arg_name):
err_str = "Argument '{}' in {} not a parameter of '{}'."
raise ValueError(err_str.format(arg_name, err_prefix, fn_or_cls.__name__))
def _get_cached_arg_spec(fn):
"""Gets cached argspec for `fn`."""
arg_spec = _ARG_SPEC_CACHE.get(fn)
if arg_spec is None:
arg_spec_fn = inspect.getfullargspec if six.PY3 else inspect.getargspec
try:
arg_spec = arg_spec_fn(fn)
except TypeError:
# `fn` might be a callable object.
arg_spec = arg_spec_fn(fn.__call__)
_ARG_SPEC_CACHE[fn] = arg_spec
return arg_spec
def _get_supplied_positional_parameter_names(fn, args):
"""Returns the names of the supplied arguments to the given function."""
arg_spec = _get_cached_arg_spec(fn)
# May be shorter than len(args) if args contains vararg (*args) arguments.
return arg_spec.args[:len(args)]
def _get_all_positional_parameter_names(fn):
"""Returns the names of all positional arguments to the given function."""
arg_spec = _get_cached_arg_spec(fn)
args = arg_spec.args
if arg_spec.defaults:
args = args[:-len(arg_spec.defaults)]
return args
def _get_kwarg_defaults(fn):
"""Returns a dict mapping kwargs to default values for the given function."""
arg_spec = _get_cached_arg_spec(fn)
if arg_spec.defaults:
default_kwarg_names = arg_spec.args[-len(arg_spec.defaults):]
arg_vals = dict(zip(default_kwarg_names, arg_spec.defaults))
else:
arg_vals = {}
if six.PY3 and arg_spec.kwonlydefaults:
arg_vals.update(arg_spec.kwonlydefaults)
return arg_vals
def _get_validated_required_kwargs(fn, fn_descriptor, whitelist, blacklist):
"""Gets required argument names, and validates against white/blacklist."""
kwarg_defaults = _get_kwarg_defaults(fn)
required_kwargs = []
for kwarg, default in six.iteritems(kwarg_defaults):
if default is REQUIRED:
if blacklist and kwarg in blacklist:
err_str = "Argument '{}' of {} marked REQUIRED but blacklisted."
raise ValueError(err_str.format(kwarg, fn_descriptor))
if whitelist and kwarg not in whitelist:
err_str = "Argument '{}' of {} marked REQUIRED but not whitelisted."
raise ValueError(err_str.format(kwarg, fn_descriptor))
required_kwargs.append(kwarg)
return required_kwargs
def _get_default_configurable_parameter_values(fn, whitelist, blacklist):
"""Retrieve all default values for configurable parameters of a function.
Any parameters included in the supplied blacklist, or not included in the
supplied whitelist, are excluded.
Args:
fn: The function whose parameter values should be retrieved.
whitelist: The whitelist (or `None`) associated with the function.
blacklist: The blacklist (or `None`) associated with the function.
Returns:
A dictionary mapping configurable parameter names to their default values.
"""
arg_vals = _get_kwarg_defaults(fn)
# Now, eliminate keywords that are blacklisted, or aren't whitelisted (if
# there's a whitelist), or aren't representable as a literal value.
for k in list(six.iterkeys(arg_vals)):
whitelist_fail = whitelist and k not in whitelist
blacklist_fail = blacklist and k in blacklist
representable = _is_literally_representable(arg_vals[k])
if whitelist_fail or blacklist_fail or not representable:
del arg_vals[k]
return arg_vals
def _order_by_signature(fn, arg_names):
"""Orders given `arg_names` based on their order in the signature of `fn`."""
arg_spec = _get_cached_arg_spec(fn)
all_args = list(arg_spec.args)
if six.PY3 and arg_spec.kwonlyargs:
all_args.extend(arg_spec.kwonlyargs)
ordered = [arg for arg in all_args if arg in arg_names]
# Handle any leftovers corresponding to varkwargs in the order we got them.
ordered.extend([arg for arg in arg_names if arg not in ordered])
return ordered
def current_scope():
return _SCOPE_MANAGER.current_scope
def current_scope_str():
return '/'.join(current_scope())
@contextlib.contextmanager
def config_scope(name_or_scope):
"""Opens a new configuration scope.
Provides a context manager that opens a new explicit configuration
scope. Explicit configuration scopes restrict parameter bindings to only
certain sections of code that run within the scope. Scopes can be nested to
arbitrary depth; any configurable functions called within a scope inherit
parameters defined by higher level scopes.
For example, suppose a function named `preprocess_images` is called in two
places in a codebase: Once when loading data for a training task, and once
when loading data for an evaluation task:
def load_training_data():
...
with gin.config_scope('train'):
images = preprocess_images(images)
...
def load_eval_data():
...
with gin.config_scope('eval'):
images = preprocess_images(images)
...
By using a `config_scope` to wrap each invocation of `preprocess_images` as
above, it is possible to use Gin to supply specific parameters to each. Here
is a possible configuration for the above example:
preprocess_images.crop_size = [64, 64]
preprocess_images.normalize_image = True
train/preprocess_images.crop_location = 'random'
train/preprocess_images.random_flip_lr = True
eval/preprocess_images.crop_location = 'center'
The `crop_size` and `normalize_image` parameters above will be shared by both
the `train` and `eval` invocations; only `train` will receive
`random_flip_lr`, and the two invocations receive different values for
`crop_location`.
Passing `None` or `''` to `config_scope` will temporarily clear all currently
active scopes (within the `with` block; they will be restored afterwards).
Args:
name_or_scope: A name for the config scope, or an existing scope (e.g.,
captured from `with gin.config_scope(...) as scope`), or `None` to clear
currently active scopes.
Raises:
ValueError: If `name_or_scope` is not a list, string, or None.
Yields:
The resulting config scope (a list of all active scope names, ordered from
outermost to innermost).
"""
try:
valid_value = True
if isinstance(name_or_scope, list):
new_scope = name_or_scope
elif name_or_scope and isinstance(name_or_scope, six.string_types):
new_scope = current_scope() # Returns a copy.
new_scope.extend(name_or_scope.split('/'))
else:
valid_value = name_or_scope in (None, '')
new_scope = []
# Append new_scope first. It will be popped in the finally block if an
# exception is raised below.
_SCOPE_MANAGER.enter_scope(new_scope)
scopes_are_valid = map(config_parser.MODULE_RE.match, new_scope)
if not valid_value or not all(scopes_are_valid):
err_str = 'Invalid value for `name_or_scope`: {}.'
raise ValueError(err_str.format(name_or_scope))
yield new_scope
finally:
_SCOPE_MANAGER.exit_scope()
def _make_gin_wrapper(fn, fn_or_cls, name, selector, whitelist, blacklist):
"""Creates the final Gin wrapper for the given function.
Args:
fn: The function that will be wrapped.
fn_or_cls: The original function or class being made configurable. This will
differ from `fn` when making a class configurable, in which case `fn` will
be the constructor/new function, while `fn_or_cls` will be the class.
name: The name given to the configurable.
selector: The full selector of the configurable (name including any module
components).
whitelist: A whitelist of configurable parameters.
blacklist: A blacklist of non-configurable parameters.
Returns:
The Gin wrapper around `fn`.
"""
# At this point we have access to the final function to be wrapped, so we
# can cache a few things here.
fn_descriptor = "'{}' ('{}')".format(name, fn_or_cls)
signature_required_kwargs = _get_validated_required_kwargs(
fn, fn_descriptor, whitelist, blacklist)
initial_configurable_defaults = _get_default_configurable_parameter_values(
fn, whitelist, blacklist)
@six.wraps(fn)
def gin_wrapper(*args, **kwargs):
"""Supplies fn with parameter values from the configuration."""
scope_components = current_scope()
new_kwargs = {}
for i in range(len(scope_components) + 1):
partial_scope_str = '/'.join(scope_components[:i])
new_kwargs.update(_CONFIG.get((partial_scope_str, selector), {}))
gin_bound_args = list(new_kwargs.keys())
scope_str = partial_scope_str
arg_names = _get_supplied_positional_parameter_names(fn, args)
for arg in args[len(arg_names):]:
if arg is REQUIRED:
raise ValueError(
'gin.REQUIRED is not allowed for unnamed (vararg) parameters. If '
'the function being called is wrapped by a non-Gin decorator, '
'try explicitly providing argument names for positional '
'parameters.')
required_arg_names = []
required_arg_indexes = []
for i, arg in enumerate(args[:len(arg_names)]):
if arg is REQUIRED:
required_arg_names.append(arg_names[i])
required_arg_indexes.append(i)
caller_required_kwargs = []
for kwarg, value in six.iteritems(kwargs):
if value is REQUIRED:
caller_required_kwargs.append(kwarg)
# If the caller passed arguments as positional arguments that correspond to
# a keyword arg in new_kwargs, remove the keyword argument from new_kwargs
# to let the caller win and avoid throwing an error. Unless it is an arg
# marked as REQUIRED.
for arg_name in arg_names:
if arg_name not in required_arg_names:
new_kwargs.pop(arg_name, None)
# Get default values for configurable parameters.
operative_parameter_values = initial_configurable_defaults.copy()
# Update with the values supplied via configuration.
operative_parameter_values.update(new_kwargs)
# Remove any values from the operative config that are overridden by the
# caller. These can't be configured, so they won't be logged. We skip values
# that are marked as REQUIRED.
for k in arg_names:
if k not in required_arg_names:
operative_parameter_values.pop(k, None)
for k in kwargs:
if k not in caller_required_kwargs:
operative_parameter_values.pop(k, None)
# An update is performed in case another caller of this same configurable
# object has supplied a different set of arguments. By doing an update, a
# Gin-supplied or default value will be present if it was used (not
# overridden by the caller) at least once.
with _OPERATIVE_CONFIG_LOCK:
op_cfg = _OPERATIVE_CONFIG.setdefault((scope_str, selector), {})
op_cfg.update(operative_parameter_values)
# We call deepcopy for two reasons: First, to prevent the called function
# from modifying any of the values in `_CONFIG` through references passed in
# via `new_kwargs`; Second, to facilitate evaluation of any
# `ConfigurableReference` instances buried somewhere inside `new_kwargs`.
# See the docstring on `ConfigurableReference.__deepcopy__` above for more
# details on the dark magic happening here.
new_kwargs = copy.deepcopy(new_kwargs)
# Validate args marked as REQUIRED have been bound in the Gin config.
missing_required_params = []
new_args = list(args)
for i, arg_name in zip(required_arg_indexes, required_arg_names):
if arg_name not in new_kwargs:
missing_required_params.append(arg_name)
else:
new_args[i] = new_kwargs.pop(arg_name)
# Validate kwargs marked as REQUIRED have been bound in the Gin config.
for required_kwarg in signature_required_kwargs:
if (required_kwarg not in arg_names and # not a positional arg
required_kwarg not in kwargs and # or a keyword arg
required_kwarg not in new_kwargs): # or bound in config
missing_required_params.append(required_kwarg)
for required_kwarg in caller_required_kwargs:
if required_kwarg not in new_kwargs:
missing_required_params.append(required_kwarg)
else:
# Remove from kwargs and let the new_kwargs value be used.
kwargs.pop(required_kwarg)
if missing_required_params:
missing_required_params = (
_order_by_signature(fn, missing_required_params))
err_str = 'Required bindings for `{}` not provided in config: {}'
minimal_selector = _REGISTRY.minimal_selector(selector)
err_str = err_str.format(minimal_selector, missing_required_params)
raise RuntimeError(err_str)
# Now, update with the caller-supplied `kwargs`, allowing the caller to have
# the final say on keyword argument values.
new_kwargs.update(kwargs)
try:
return fn(*new_args, **new_kwargs)
except Exception as e: # pylint: disable=broad-except
err_str = ''
if isinstance(e, TypeError):
all_arg_names = _get_all_positional_parameter_names(fn)
if len(new_args) < len(all_arg_names):
unbound_positional_args = list(
set(all_arg_names[len(new_args):]) - set(new_kwargs))
if unbound_positional_args:
caller_supplied_args = list(
set(arg_names + list(kwargs)) -
set(required_arg_names + list(caller_required_kwargs)))
fmt = ('\n No values supplied by Gin or caller for arguments: {}'
'\n Gin had values bound for: {gin_bound_args}'
'\n Caller supplied values for: {caller_supplied_args}')
canonicalize = lambda x: list(map(str, sorted(x)))
err_str += fmt.format(
canonicalize(unbound_positional_args),
gin_bound_args=canonicalize(gin_bound_args),
caller_supplied_args=canonicalize(caller_supplied_args))
err_str += "\n In call to configurable '{}' ({}){}"
scope_info = " in scope '{}'".format(scope_str) if scope_str else ''
err_str = err_str.format(name, fn_or_cls, scope_info)
utils.augment_exception_message_and_reraise(e, err_str)
return gin_wrapper
def _make_configurable(fn_or_cls,
name=None,
module=None,
whitelist=None,
blacklist=None,
subclass=False):
"""Wraps `fn_or_cls` to make it configurable.
Infers the configurable name from `fn_or_cls.__name__` if necessary, and
updates global state to keep track of configurable name <-> function
mappings, as well as whitelisted and blacklisted parameters.
Args:
fn_or_cls: The function or class to decorate.
name: A name for the configurable. If `None`, the name will be inferred from
from `fn_or_cls`. The `name` may also include module components to be used
for disambiguation (these will be appended to any components explicitly
specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. If `None`, `fn_or_cls.__module__` will be used (if no module
is specified as part of `name`).
whitelist: A whitelisted set of parameter names to supply values for.
blacklist: A blacklisted set of parameter names not to supply values for.
subclass: If `fn_or_cls` is a class and `subclass` is `True`, decorate by
subclassing `fn_or_cls` and overriding its `__init__` method. If `False`,
replace the existing `__init__` with a decorated version.
Returns:
A wrapped version of `fn_or_cls` that will take parameter values from the
global configuration.
Raises:
RuntimeError: If the config is locked.
ValueError: If a configurable with `name` (or the name of `fn_or_cls`)
already exists, or if both a whitelist and blacklist are specified.
"""
if config_is_locked():
err_str = 'Attempted to add a new configurable after the config was locked.'
raise RuntimeError(err_str)
name = fn_or_cls.__name__ if name is None else name
if config_parser.IDENTIFIER_RE.match(name):
default_module = getattr(fn_or_cls, '__module__', None)
module = default_module if module is None else module
elif not config_parser.MODULE_RE.match(name):
raise ValueError("Configurable name '{}' is invalid.".format(name))
if module is not None and not config_parser.MODULE_RE.match(module):
raise ValueError("Module '{}' is invalid.".format(module))
selector = module + '.' + name if module else name
if not _INTERACTIVE_MODE and selector in _REGISTRY:
err_str = ("A configurable matching '{}' already exists.\n\n"
'To allow re-registration of configurables in an interactive '
'environment, use:\n\n'
' gin.enter_interactive_mode()')
raise ValueError(err_str.format(selector))
if whitelist and blacklist:
err_str = 'A whitelist or a blacklist can be specified, but not both.'
raise ValueError(err_str)
if whitelist and not isinstance(whitelist, (list, tuple)):
raise TypeError('Whitelist should be a list or tuple.')
if blacklist and not isinstance(blacklist, (list, tuple)):
raise TypeError('Blacklist should be a list or tuple.')
_validate_parameters(fn_or_cls, whitelist, 'whitelist')
_validate_parameters(fn_or_cls, blacklist, 'blacklist')
def decorator(fn):
"""Wraps `fn` so that it obtains parameters from the configuration."""
return _make_gin_wrapper(fn, fn_or_cls, name, selector, whitelist,
blacklist)
decorated_fn_or_cls = _decorate_fn_or_cls(
decorator, fn_or_cls, subclass=subclass)
_REGISTRY[selector] = Configurable(
decorated_fn_or_cls,
name=name,
module=module,
whitelist=whitelist,
blacklist=blacklist,
selector=selector)
return decorated_fn_or_cls
def configurable(name_or_fn=None, module=None, whitelist=None, blacklist=None):
"""Decorator to make a function or class configurable.
This decorator registers the decorated function/class as configurable, which
allows its parameters to be supplied from the global configuration (i.e., set
through `bind_parameter` or `parse_config`). The decorated function is
associated with a name in the global configuration, which by default is simply
the name of the function or class, but can be specified explicitly to avoid
naming collisions or improve clarity.
If some parameters should not be configurable, they can be specified in
`blacklist`. If only a restricted set of parameters should be configurable,
they can be specified in `whitelist`.
The decorator can be used without any parameters as follows:
@config.configurable
def some_configurable_function(param1, param2='a default value'):
...
In this case, the function is associated with the name
`'some_configurable_function'` in the global configuration, and both `param1`
and `param2` are configurable.
The decorator can be supplied with parameters to specify the configurable name
or supply a whitelist/blacklist:
@config.configurable('explicit_configurable_name', whitelist='param2')
def some_configurable_function(param1, param2='a default value'):
...
In this case, the configurable is associated with the name
`'explicit_configurable_name'` in the global configuration, and only `param2`
is configurable.
Classes can be decorated as well, in which case parameters of their
constructors are made configurable:
@config.configurable
class SomeClass(object):
def __init__(self, param1, param2='a default value'):
...
In this case, the name of the configurable is `'SomeClass'`, and both `param1`
and `param2` are configurable.
Args:
name_or_fn: A name for this configurable, or a function to decorate (in
which case the name will be taken from that function). If not set,
defaults to the name of the function/class that is being made
configurable. If a name is provided, it may also include module components
to be used for disambiguation (these will be appended to any components
explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, the module of the function or class being made
configurable will be used (if no module is specified as part of the name).
whitelist: A whitelisted set of kwargs that should be configurable. All
other kwargs will not be configurable. Only one of `whitelist` or
`blacklist` should be specified.
blacklist: A blacklisted set of kwargs that should not be configurable. All
other kwargs will be configurable. Only one of `whitelist` or `blacklist`
should be specified.
Returns:
When used with no parameters (or with a function/class supplied as the first
parameter), it returns the decorated function or class. When used with
parameters, it returns a function that can be applied to decorate the target
function or class.
"""
decoration_target = None
if callable(name_or_fn):
decoration_target = name_or_fn
name = None
else:
name = name_or_fn
def perform_decoration(fn_or_cls):
return _make_configurable(fn_or_cls, name, module, whitelist, blacklist)
if decoration_target:
return perform_decoration(decoration_target)
return perform_decoration
def external_configurable(fn_or_cls,
name=None,
module=None,
whitelist=None,
blacklist=None):
"""Allow referencing/configuring an external class or function.
This alerts Gin to the existence of the class or function `fn_or_cls` in the
event that it can't be easily annotated with `@configurable` (for instance, if
it is from another project). This allows `fn_or_cls` to be configured and
referenced (using the `@name` notation) via parameter binding strings.
Note that only calls to the return value of this function or resulting from
references to `fn_or_cls` made through binding strings (configurations) will
have their parameters injected by Gin---explicit calls to `fn_or_cls` directly
won't have any parameter bindings applied.
Args:
fn_or_cls: The external function or class that should be made configurable.
name: The configurable name to be associated with `fn_or_cls`. The name may
also include module components to be used for disambiguation (these will
be appended to any components explicitly specified by `module`).
module: The module to associate with the configurable, to help handle naming
collisions. By default, `fn_or_cls.__module__` will be used (if no
module is specified as part of the name).
whitelist: A whitelist of parameter names to allow configuration for.
blacklist: A blacklist of parameter names not to allow configuration for.
Returns:
A decorated version of `fn_or_cls` that permits parameter binding. For
functions, this is just a wrapped version of the function. For classes, this
is a carefully constructed subclass of `fn_or_cls` designed to behave nearly
identically (even under many type inspection operations) save for the
addition of parameter binding.
"""
return _make_configurable(
fn_or_cls,
name=name,
module=module,
whitelist=whitelist,
blacklist=blacklist,
subclass=True)
def _config_str(configuration_object,
max_line_length=80,
continuation_indent=4):
"""Print the configuration specified in configuration object.
Args:
configuration_object: Either OPERATIVE_CONFIG_ (operative config) or _CONFIG
(all config, bound and unbound).
max_line_length: A (soft) constraint on the maximum length of a line in the
formatted string. Large nested structures will be split across lines, but
e.g. long strings won't be split into a concatenation of shorter strings.
continuation_indent: The indentation for continued lines.
Returns:
A config string capturing all parameter values set by the object.
"""
def format_binding(key, value):
"""Pretty print the given key/value pair."""
formatted_val = pprint.pformat(
value, width=(max_line_length - continuation_indent))
formatted_val_lines = formatted_val.split('\n')
if (len(formatted_val_lines) == 1 and
len(key + formatted_val) <= max_line_length):
output = '{} = {}'.format(key, formatted_val)
else:
indented_formatted_val = '\n'.join(
[' ' * continuation_indent + line for line in formatted_val_lines])
output = '{} = \\\n{}'.format(key, indented_formatted_val)
return output
def sort_key(key_tuple):
"""Sort configurable selector/innermost scopes, ignoring case."""
scope, selector = key_tuple[0]
parts = selector.lower().split('.')[::-1] + scope.lower().split('/')[::-1]
return '/'.join(parts)
# Build the output as an array of formatted Gin statements. Each statement may
# span multiple lines. Imports are first, followed by macros, and finally all
# other bindings sorted in alphabetical order by configurable name.
formatted_statements = [
'import {}'.format(module) for module in sorted(_IMPORTED_MODULES)
]
if formatted_statements:
formatted_statements.append('')
macros = {}
for (scope, selector), config in six.iteritems(configuration_object):
if _REGISTRY[selector].fn_or_cls == macro:
macros[scope, selector] = config
if macros:
formatted_statements.append('# Macros:')
formatted_statements.append('# ' + '=' * (max_line_length - 2))
for (name, _), config in sorted(macros.items(), key=sort_key):
binding = format_binding(name, config['value'])
formatted_statements.append(binding)
if macros:
formatted_statements.append('')
sorted_items = sorted(configuration_object.items(), key=sort_key)
for (scope, selector), config in sorted_items:
configurable_ = _REGISTRY[selector]
fn = configurable_.fn_or_cls
if fn == macro or fn == _retrieve_constant:
continue
minimal_selector = _REGISTRY.minimal_selector(configurable_.selector)
scoped_selector = (scope + '/' if scope else '') + minimal_selector
parameters = [(k, v) for k, v in six.iteritems(config)
if _is_literally_representable(v)]
formatted_statements.append('# Parameters for {}:'.format(scoped_selector))
formatted_statements.append('# ' + '=' * (max_line_length - 2))
for arg, val in sorted(parameters):
binding = format_binding('{}.{}'.format(scoped_selector, arg), val)
formatted_statements.append(binding)
if not parameters:
formatted_statements.append('# None.')
formatted_statements.append('')
return '\n'.join(formatted_statements)
def operative_config_str(max_line_length=80, continuation_indent=4):
"""Retrieve the "operative" configuration as a config string.
The operative configuration consists of all parameter values used by
configurable functions that are actually called during execution of the
current program. Parameters associated with configurable functions that are
not called (and so can have no effect on program execution) won't be included.
The goal of the function is to return a config that captures the full set of
relevant configurable "hyperparameters" used by a program. As such, the
returned configuration will include the default values of arguments from
configurable functions (as long as the arguments aren't blacklisted or missing
from a supplied whitelist), as well as any parameter values overridden via
`bind_parameter` or through `parse_config`.
Any parameters that can't be represented as literals (capable of being parsed
by `parse_config`) are excluded. The resulting config string is sorted
lexicographically and grouped by configurable name.
Args:
max_line_length: A (soft) constraint on the maximum length of a line in the
formatted string. Large nested structures will be split across lines, but
e.g. long strings won't be split into a concatenation of shorter strings.
continuation_indent: The indentation for continued lines.
Returns:
A config string capturing all parameter values set in the current program.
"""
return _config_str(_OPERATIVE_CONFIG, max_line_length, continuation_indent)
def config_str(max_line_length=80, continuation_indent=4):
"""Retrieve the interpreted configuration as a config string.
This is not the _operative configuration_, in that it includes parameter
values which are unused by by the program.
Args:
max_line_length: A (soft) constraint on the maximum length of a line in the
formatted string. Large nested structures will be split across lines, but
e.g. long strings won't be split into a concatenation of shorter strings.
continuation_indent: The indentation for continued lines.
Returns:
A config string capturing all parameter values used by the current program.
"""
return _config_str(_CONFIG, max_line_length, continuation_indent)
def parse_config(bindings, skip_unknown=False):
"""Parse a file, string, or list of strings containing parameter bindings.
Parses parameter binding strings to set up the global configuration. Once
`parse_config` has been called, any calls to configurable functions will have
parameter values set according to the values specified by the parameter
bindings in `bindings`.
An individual parameter binding has the format
maybe/some/scopes/configurable_name.parameter_name = value
Multiple binding strings can be passed either in the form of a file-like
object supporting the `readline` method, a single string with each individual
parameter binding separated by a newline, or as a list of individual parameter
binding strings.
Any Python literal (lists, tuples, dicts, strings, etc.) is acceptable to the
right of the equals sign, and follows standard Python rules for line
continuation. Additionally, a value starting with '@' is interpreted as a
(possibly scoped) reference to another configurable function, in which case
this value is replaced by a reference to that function. If the value
furthermore ends in `()` (e.g., `@configurable_name()`), then the value
returned when calling the function is used (it will be called *just before*
the function consuming the output is called).
See the module documentation for a more detailed description of scoping
mechanisms and a complete example.
Reading from a file could be done as follows:
with open('/path/to/file.config') as bindings:
gin.parse_config(bindings)
Passing a newline separated string of parameter bindings might look like:
bindings = '''
my_class.param_one = 'asdf'
my_class_param_two = 9.7
'''
gin.parse_config(bindings)
Alternatively, one can declare a list of parameter bindings and pass it in:
bindings = [
'my_class.param_one = "asdf"',
'my_class.param_two = 9.7',
]
gin.parse_config(bindings)
Can skip unknown configurables. For example, if no module containing a
'training' configurable was imported, errors can be avoided by specifying
`skip_unknown=True`:
bindings = [
'my_class.param_one = "asdf"',
'my_class.param_two = 9.7',
'training.learning_rate = 0.1',
]
gin.parse_config(bindings, skip_unknown=True)
Args:
bindings: A file-like object supporting the readline method, a newline
separated string of parameter bindings, or a list of individual parameter
binding strings.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped (instead of causing an error). Configurable references
to unknown configurables will cause errors if they are present in a
binding that is not itself skipped due to an unknown configurable. This
can also be a list of configurable names: any unknown configurables that
do not match an item in the list will still cause errors. Note that
bindings for known configurables will always be parsed.
"""
if isinstance(bindings, (list, tuple)):
bindings = '\n'.join(bindings)
_validate_skip_unknown(skip_unknown)
if isinstance(skip_unknown, (list, tuple)):
skip_unknown = set(skip_unknown)
parser = config_parser.ConfigParser(bindings, ParserDelegate(skip_unknown))
for statement in parser:
if isinstance(statement, config_parser.BindingStatement):
scope, selector, arg_name, value, location = statement
if not arg_name:
macro_name = '{}/{}'.format(scope, selector) if scope else selector
with utils.try_with_location(location):
bind_parameter((macro_name, 'gin.macro', 'value'), value)
continue
if not _should_skip(selector, skip_unknown):
with utils.try_with_location(location):
bind_parameter((scope, selector, arg_name), value)
elif isinstance(statement, config_parser.ImportStatement):
if skip_unknown:
try:
__import__(statement.module)
_IMPORTED_MODULES.add(statement.module)
except ImportError:
tb_len = len(traceback.extract_tb(sys.exc_info()[2]))
log_str = ('Skipping import of unknown module `%s` '
'(skip_unknown=True).')
log_args = [statement.module]
if tb_len > 1:
# In case the error comes from a nested import (i.e. the module is
# available, but it imports some unavailable module), print the
# traceback to avoid confusion.
log_str += '\n%s'
log_args.append(traceback.format_exc())
logging.info(log_str, *log_args)
else:
with utils.try_with_location(statement.location):
__import__(statement.module)
_IMPORTED_MODULES.add(statement.module)
elif isinstance(statement, config_parser.IncludeStatement):
with utils.try_with_location(statement.location):
parse_config_file(statement.filename, skip_unknown)
else:
raise AssertionError('Unrecognized statement type {}.'.format(statement))
def register_file_reader(*args):
"""Register a file reader for use in parse_config_file.
Registered file readers will be used to try reading files passed to
`parse_config_file`. All file readers (beginning with the default `open`) will
be tried until one of them succeeds at opening the file.
This function may also be be used used as a decorator. For example:
@register_file_reader(IOError)
def exotic_data_source(filename):
...
Args:
*args: (When used as a decorator, only the existence check is supplied.)
- file_reader_fn: The file reader function to register. This should be a
function that can be used as a context manager to open a file and
provide a file-like object, similar to Python's built-in `open`.
- is_readable_fn: A function taking the file path and returning a boolean
indicating whether the file can be read by `file_reader_fn`.
Returns:
`None`, or when used as a decorator, a function that will perform the
registration using the supplied readability predicate.
"""
def do_registration(file_reader_fn, is_readable_fn):
if file_reader_fn not in list(zip(*_FILE_READERS))[0]:
_FILE_READERS.append((file_reader_fn, is_readable_fn))
if len(args) == 1: # It's a decorator.
return functools.partial(do_registration, is_readable_fn=args[0])
elif len(args) == 2:
do_registration(*args)
else: # 0 or > 2 arguments supplied.
err_str = 'register_file_reader() takes 1 or 2 arguments ({} given)'
raise TypeError(err_str.format(len(args)))
def add_config_file_search_path(location_prefix):
"""Adds a path that will be searched for config files by parse_config_file."""
_LOCATION_PREFIXES.append(location_prefix)
def parse_config_file(config_file, skip_unknown=False):
"""Parse a Gin config file.
Args:
config_file: The path to a Gin config file.
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
Raises:
IOError: If `config_file` cannot be read using any register file reader.
"""
prefixes = _LOCATION_PREFIXES if not os.path.isabs(config_file) else ['']
for location_prefix in prefixes:
config_file_with_prefix = os.path.join(location_prefix, config_file)
for reader, existence_check in _FILE_READERS:
if existence_check(config_file_with_prefix):
with reader(config_file_with_prefix) as f:
parse_config(f, skip_unknown=skip_unknown)
return
err_str = 'Unable to open file: {}. Searched config paths: {}.'
raise IOError(err_str.format(config_file, prefixes))
def parse_config_files_and_bindings(config_files,
bindings,
finalize_config=True,
skip_unknown=False):
"""Parse a list of config files followed by extra Gin bindings.
This function is equivalent to:
for config_file in config_files:
gin.parse_config_file(config_file, skip_configurables)
gin.parse_config(bindings, skip_configurables)
if finalize_config:
gin.finalize()
Args:
config_files: A list of paths to the Gin config files.
bindings: A list of individual parameter binding strings.
finalize_config: Whether to finalize the config after parsing and binding
(defaults to True).
skip_unknown: A boolean indicating whether unknown configurables and imports
should be skipped instead of causing errors (alternatively a list of
configurable names to skip if unknown). See `parse_config` for additional
details.
"""
if config_files is None:
config_files = []
if bindings is None:
bindings = ''
for config_file in config_files:
parse_config_file(config_file, skip_unknown)
parse_config(bindings, skip_unknown)
if finalize_config:
finalize()
def parse_value(value):
"""Parse and return a single Gin value."""
if not isinstance(value, six.string_types):
raise ValueError('value ({}) should be a string type.'.format(value))
return config_parser.ConfigParser(value, ParserDelegate()).parse_value()
def config_is_locked():
return _CONFIG_IS_LOCKED
def _set_config_is_locked(is_locked):
global _CONFIG_IS_LOCKED
_CONFIG_IS_LOCKED = is_locked
@contextlib.contextmanager
def unlock_config():
"""A context manager that temporarily unlocks the config.
Once the config has been locked by `gin.finalize`, it can only be modified
using this context manager (to make modifications explicit). Example:
with gin.unlock_config():
...
gin.bind_parameter(...)
In the case where the config is already unlocked, this does nothing (the
config remains unlocked).
Yields:
None.
"""
config_was_locked = config_is_locked()
_set_config_is_locked(False)
yield
_set_config_is_locked(config_was_locked)
def enter_interactive_mode():
global _INTERACTIVE_MODE
_INTERACTIVE_MODE = True
def exit_interactive_mode():
global _INTERACTIVE_MODE
_INTERACTIVE_MODE = False
@contextlib.contextmanager
def interactive_mode():
try:
enter_interactive_mode()
yield
finally:
exit_interactive_mode()
def finalize():
"""A function that should be called after parsing all Gin config files.
Calling this function allows registered "finalize hooks" to inspect (and
potentially modify) the Gin config, to provide additional functionality. Hooks
should not modify the configuration object they receive directly; instead,
they should return a dictionary mapping Gin binding keys to (new or updated)
values. This way, all hooks see the config as originally parsed.
Raises:
RuntimeError: If the config is already locked.
ValueError: If two or more hooks attempt to modify or introduce bindings for
the same key. Since it is difficult to control the order in which hooks
are registered, allowing this could yield unpredictable behavior.
"""
if config_is_locked():
raise RuntimeError('Finalize called twice (config already locked).')
bindings = {}
for hook in _FINALIZE_HOOKS:
new_bindings = hook(_CONFIG)
if new_bindings is not None:
for key, value in six.iteritems(new_bindings):
pbk = ParsedBindingKey(key)
if pbk in bindings:
err_str = 'Received conflicting updates when running {}.'
raise ValueError(err_str.format(hook))
bindings[pbk] = value
for pbk, value in six.iteritems(bindings):
bind_parameter(pbk, value)
_set_config_is_locked(True)
def register_finalize_hook(fn):
"""Registers `fn` as a hook that will run during `gin.finalize`.
All finalize hooks should accept the current config, and return a dictionary
containing any additional parameter bindings that should occur in the form of
a mapping from (scoped) configurable names to values.
Args:
fn: The function to register.
Returns:
`fn`, allowing `register_finalize_hook` to be used as a decorator.
"""
_FINALIZE_HOOKS.append(fn)
return fn
def _iterate_flattened_values(value):
"""Provides an iterator over all values in a nested structure."""
if isinstance(value, six.string_types):
yield value
return
if isinstance(value, collections.Mapping):
value = collections.ValuesView(value)
if isinstance(value, collections.Iterable):
for nested_value in value:
for nested_nested_value in _iterate_flattened_values(nested_value):
yield nested_nested_value
yield value
def iterate_references(config, to=None):
"""Provides an iterator over references in the given config.
Args:
config: A dictionary mapping scoped configurable names to argument bindings.
to: If supplied, only yield references whose `configurable_fn` matches `to`.
Yields:
`ConfigurableReference` instances within `config`, maybe restricted to those
matching the `to` parameter if it is supplied.
"""
for value in _iterate_flattened_values(config):
if isinstance(value, ConfigurableReference):
if to is None or value.configurable.fn_or_cls == to:
yield value
def validate_reference(ref, require_bindings=True, require_evaluation=False):
if require_bindings and ref.config_key not in _CONFIG:
err_str = "No bindings specified for '{}' in config string: \n{}"
raise ValueError(err_str.format(ref.scoped_selector, config_str()))
if require_evaluation and not ref.evaluate:
err_str = ("Reference '{}' must be evaluated (add '()') "
'in config string: \n{}.')
raise ValueError(err_str.format(ref, config_str()))
@configurable(module='gin')
def macro(value):
"""A Gin macro."""
return value
@configurable('constant', module='gin')
def _retrieve_constant():
"""Fetches and returns a constant from the _CONSTANTS map."""
return _CONSTANTS[current_scope_str()]
@configurable(module='gin')
def singleton(constructor):
return singleton_value(current_scope_str(), constructor)
def singleton_value(key, constructor=None):
if key not in _SINGLETONS:
if not constructor:
err_str = "No singleton found for key '{}', and no constructor was given."
raise ValueError(err_str.format(key))
if not callable(constructor):
err_str = "The constructor for singleton '{}' is not callable."
raise ValueError(err_str.format(key))
_SINGLETONS[key] = constructor()
return _SINGLETONS[key]
def constant(name, value):
"""Creates a constant that can be referenced from gin config files.
After calling this function in Python, the constant can be referenced from
within a Gin config file using the macro syntax. For example, in Python:
gin.constant('THE_ANSWER', 42)
Then, in a Gin config file:
meaning.of_life = %THE_ANSWER
Note that any Python object can be used as the value of a constant (including
objects not representable as Gin literals). Values will be stored until
program termination in a Gin-internal dictionary, so avoid creating constants
with values that should have a limited lifetime.
Optionally, a disambiguating module may be prefixed onto the constant
name. For instance:
gin.constant('some.modules.PI', 3.14159)
Args:
name: The name of the constant, possibly prepended by one or more
disambiguating module components separated by periods. An macro with this
name (including the modules) will be created.
value: The value of the constant. This can be anything (including objects
not representable as Gin literals). The value will be stored and returned
whenever the constant is referenced.
Raises:
ValueError: If the constant's selector is invalid, or a constant with the
given selector already exists.
"""
if not config_parser.MODULE_RE.match(name):
raise ValueError("Invalid constant selector '{}'.".format(name))
if _CONSTANTS.matching_selectors(name):
err_str = "Constants matching selector '{}' already exist ({})."
raise ValueError(err_str.format(name, _CONSTANTS.matching_selectors(name)))
_CONSTANTS[name] = value
def constants_from_enum(cls=None, module=None):
"""Decorator for an enum class that generates Gin constants from values.
Generated constants have format `module.ClassName.ENUM_VALUE`. The module
name is optional when using the constant.
Args:
cls: Class type.
module: The module to associate with the constants, to help handle naming
collisions. If `None`, `cls.__module__` will be used.
Returns:
Class type (identity function).
Raises:
TypeError: When applied to a non-enum class.
"""
def decorator(cls, module=module):
if not issubclass(cls, enum.Enum):
raise TypeError(
"Class '{}' is not subclass of enum.".format(cls.__name__))
if module is None:
module = cls.__module__
for value in cls:
constant('{}.{}'.format(module, str(value)), value)
return cls
if cls is None:
return decorator
return decorator(cls)
@register_finalize_hook
def validate_macros_hook(config):
for ref in iterate_references(config, to=macro):
validate_reference(ref, require_evaluation=True)
@register_finalize_hook
def find_unknown_references_hook(config):
"""Hook to find/raise errors for references to unknown configurables."""
additional_msg_fmt = " In binding for '{}'."
for (scope, selector), param_bindings in six.iteritems(config):
for param_name, param_value in six.iteritems(param_bindings):
for maybe_unknown in _iterate_flattened_values(param_value):
if isinstance(maybe_unknown, _UnknownConfigurableReference):
scope_str = scope + '/' if scope else ''
min_selector = _REGISTRY.minimal_selector(selector)
binding_key = '{}{}.{}'.format(scope_str, min_selector, param_name)
additional_msg = additional_msg_fmt.format(binding_key)
_raise_unknown_reference_error(maybe_unknown, additional_msg)
|
# Script to convert questions in a .txt file to a .cpp file where they
# are stored in a struct
ques_cpp = open("ques.cpp", 'w');
ques_cpp.write('#include "quiz.h"\n')
ques_cpp.write('#include <string.h>\n\n')
ques_cpp.write('question questions[3][3][3];\n\n')
ques_cpp.write('void init()\n{\n\n')
for n in xrange(0,3):
if n == 0:
ques_txt = open("q_lvl1.txt");
elif n == 1:
ques_txt = open("q_lvl2.txt");
elif n == 2:
ques_txt = open("q_lvl3.txt");
for i in xrange(0,3):
for j in xrange(0,3):
question = ques_txt.readline()
w = "strcpy(questions[%d][%d][%d].q, %s) ; \n" % (n, i, j, question[0:len(question) - 1])
ques_cpp.write(w)
for k in xrange(0, 4):
option = ques_txt.readline()
ques_cpp.write('strcpy(questions[%d][%d][%d].options[%d], %s) ; \n' % (n, i, j, k, option[0:len(option) - 1]))
answer = ques_txt.readline()
ques_cpp.write('questions[%d][%d][%d].correct = %s ; \n\n' % (n, i, j, answer[0:1]))
blank = ques_txt.readline()
ques_txt.close()
ques_cpp.write('}')
ques_cpp.close()
Added a "Generated from script.py" comment
# Script to convert questions in a .txt file to a .cpp file where they
# are stored in a struct
ques_cpp = open("ques.cpp", 'w');
ques_cpp.write('/* Generated from script.py */ \n\n')
ques_cpp.write('#include "quiz.h"\n')
ques_cpp.write('#include <string.h>\n\n')
ques_cpp.write('question questions[3][3][3];\n\n')
ques_cpp.write('void init_ques()\n{\n\n')
for n in xrange(0,3):
if n == 0:
ques_txt = open("q_lvl1.txt");
elif n == 1:
ques_txt = open("q_lvl2.txt");
elif n == 2:
ques_txt = open("q_lvl3.txt");
for i in xrange(0,3):
for j in xrange(0,3):
question = ques_txt.readline()
w = "strcpy(questions[%d][%d][%d].q, %s) ; \n" % (n, i, j, question[0:len(question) - 1])
ques_cpp.write(w)
for k in xrange(0, 4):
option = ques_txt.readline()
ques_cpp.write('strcpy(questions[%d][%d][%d].options[%d], %s) ; \n' % (n, i, j, k, option[0:len(option) - 1]))
answer = ques_txt.readline()
ques_cpp.write('questions[%d][%d][%d].correct = %s ; \n\n' % (n, i, j, answer[0:1]))
blank = ques_txt.readline()
ques_txt.close()
ques_cpp.write('}')
ques_cpp.close()
|
# coding: utf-8
"""A lab app that runs a sub process for a demo or a test."""
from os import path as osp
from os.path import join as pjoin
from stat import S_IRUSR, S_IRGRP, S_IROTH
import argparse
import atexit
import glob
import json
import logging
import os
import pkg_resources
import shutil
import sys
import tempfile
from tempfile import TemporaryDirectory
from unittest.mock import patch
from traitlets import Bool, Dict, Unicode
from ipykernel.kernelspec import write_kernel_spec
import jupyter_core
from jupyter_core.application import base_aliases, base_flags
from jupyterlab_server.process_app import ProcessApp
import jupyterlab_server
HERE = osp.realpath(osp.dirname(__file__))
def _create_notebook_dir():
"""Create a temporary directory with some file structure."""
root_dir = tempfile.mkdtemp(prefix='mock_contents')
os.mkdir(osp.join(root_dir, 'src'))
with open(osp.join(root_dir, 'src', 'temp.txt'), 'w') as fid:
fid.write('hello')
readonly_filepath = osp.join(root_dir, 'src', 'readonly-temp.txt')
with open(readonly_filepath, 'w') as fid:
fid.write('hello from a readonly file')
os.chmod(readonly_filepath, S_IRUSR | S_IRGRP | S_IROTH)
atexit.register(lambda: shutil.rmtree(root_dir, True))
return root_dir
def _create_schemas_dir():
"""Create a temporary directory for schemas."""
root_dir = tempfile.mkdtemp(prefix='mock_schemas')
extension_dir = osp.join(root_dir, '@jupyterlab', 'apputils-extension')
os.makedirs(extension_dir)
# Get schema content.
schema_package = jupyterlab_server.__name__
schema_path = 'tests/schemas/@jupyterlab/apputils-extension/themes.json'
themes = pkg_resources.resource_string(schema_package, schema_path)
with open(osp.join(extension_dir, 'themes.json'), 'w') as fid:
fid.write(themes.decode('utf-8'))
atexit.register(lambda: shutil.rmtree(root_dir, True))
return root_dir
def _create_user_settings_dir():
"""Create a temporary directory for workspaces."""
root_dir = tempfile.mkdtemp(prefix='mock_user_settings')
atexit.register(lambda: shutil.rmtree(root_dir, True))
return root_dir
def _create_workspaces_dir():
"""Create a temporary directory for workspaces."""
root_dir = tempfile.mkdtemp(prefix='mock_workspaces')
atexit.register(lambda: shutil.rmtree(root_dir, True))
return root_dir
def _install_kernels():
# Install echo and ipython kernels - should be done after env patch
kernel_json = {
'argv': [
sys.executable,
'-m', 'jupyterlab.tests.echo_kernel',
'-f', '{connection_file}'
],
'display_name': "Echo Kernel",
'language': 'echo'
}
paths = jupyter_core.paths
kernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', 'echo')
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, 'kernel.json'), 'w') as f:
f.write(json.dumps(kernel_json))
ipykernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', 'ipython')
write_kernel_spec(ipykernel_dir)
class _test_env(object):
"""Set Jupyter path variables to a temporary directory
Useful as a context manager or with explicit start/stop
"""
def start(self):
self.test_dir = td = TemporaryDirectory()
self.env_patch = patch.dict(os.environ, {
'JUPYTER_CONFIG_DIR': pjoin(td.name, 'jupyter'),
'JUPYTER_DATA_DIR': pjoin(td.name, 'jupyter_data'),
'JUPYTER_RUNTIME_DIR': pjoin(td.name, 'jupyter_runtime'),
'IPYTHONDIR': pjoin(td.name, 'ipython'),
})
self.env_patch.start()
self.path_patch = patch.multiple(
jupyter_core.paths,
SYSTEM_JUPYTER_PATH=[pjoin(td.name, 'share', 'jupyter')],
ENV_JUPYTER_PATH=[pjoin(td.name, 'env', 'share', 'jupyter')],
SYSTEM_CONFIG_PATH=[pjoin(td.name, 'etc', 'jupyter')],
ENV_CONFIG_PATH=[pjoin(td.name, 'env', 'etc', 'jupyter')],
)
self.path_patch.start()
def stop(self):
self.env_patch.stop()
self.path_patch.stop()
try:
self.test_dir.cleanup()
except PermissionError as e:
pass
def __enter__(self):
self.start()
return self.test_dir.name
def __exit__(self, *exc_info):
self.stop()
class ProcessTestApp(ProcessApp):
"""A process app for running tests, includes a mock contents directory.
"""
allow_origin = Unicode('*')
notebook_dir = Unicode(_create_notebook_dir())
schemas_dir = Unicode(_create_schemas_dir())
user_settings_dir = Unicode(_create_user_settings_dir())
workspaces_dir = Unicode(_create_workspaces_dir())
def __init__(self):
self.env_patch = _test_env()
self.env_patch.start()
ProcessApp.__init__(self)
def init_server_extensions(self):
"""Disable server extensions"""
pass
def start(self):
_install_kernels()
self.kernel_manager.default_kernel_name = 'echo'
self.lab_config.schemas_dir = self.schemas_dir
self.lab_config.user_settings_dir = self.user_settings_dir
self.lab_config.workspaces_dir = self.workspaces_dir
ProcessApp.start(self)
def _process_finished(self, future):
self.http_server.stop()
self.io_loop.stop()
self.env_patch.stop()
try:
os._exit(future.result())
except Exception as e:
self.log.error(str(e))
os._exit(1)
jest_aliases = dict(base_aliases)
jest_aliases.update({
'testPathPattern': 'JestApp.testPathPattern'
})
jest_aliases.update({
'testNamePattern': 'JestApp.testNamePattern'
})
jest_flags = dict(base_flags)
jest_flags['coverage'] = (
{'JestApp': {'coverage': True}},
'Run coverage'
)
jest_flags['watchAll'] = (
{'JestApp': {'watchAll': True}},
'Watch all test files'
)
class JestApp(ProcessTestApp):
"""A notebook app that runs a jest test."""
coverage = Bool(False, help='Whether to run coverage').tag(config=True)
testPathPattern = Unicode('').tag(config=True)
testNamePattern = Unicode('').tag(config=True)
watchAll = Bool(False).tag(config=True)
aliases = jest_aliases
flags = jest_flags
jest_dir = Unicode('')
test_config = Dict(dict(foo='bar'))
open_browser = False
def get_command(self):
"""Get the command to run"""
terminalsAvailable = self.web_app.settings['terminals_available']
jest = './node_modules/.bin/jest'
debug = self.log.level == logging.DEBUG
jest = osp.realpath(osp.join(self.jest_dir, jest))
if os.name == 'nt':
jest += '.cmd'
cmd = []
if self.coverage:
cmd += [jest, '--coverage']
elif debug:
cmd += ['node', '--inspect-brk', jest, '--no-cache']
if self.watchAll:
cmd += ['--watchAll']
else:
cmd += ['--watch']
else:
cmd += [jest]
if self.testPathPattern:
cmd += ['--testPathPattern', self.testPathPattern]
if self.testNamePattern:
cmd += ['--testNamePattern', self.testNamePattern]
cmd += ['--runInBand']
if self.log_level > logging.INFO:
cmd += ['--silent']
config = dict(baseUrl=self.connection_url,
terminalsAvailable=str(terminalsAvailable),
token=self.token)
config.update(**self.test_config)
td = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(td, True))
config_path = os.path.join(td, 'config.json')
with open(config_path, 'w') as fid:
json.dump(config, fid)
env = os.environ.copy()
env['JUPYTER_CONFIG_DATA'] = config_path
return cmd, dict(cwd=self.jest_dir, env=env)
class KarmaTestApp(ProcessTestApp):
"""A notebook app that runs the jupyterlab karma tests.
"""
karma_pattern = Unicode('src/*.spec.ts*')
karma_base_dir = Unicode('')
def get_command(self):
"""Get the command to run."""
terminalsAvailable = self.web_app.settings['terminals_available']
# Compatibility with Notebook 4.2.
token = getattr(self, 'token', '')
config = dict(baseUrl=self.connection_url, token=token,
terminalsAvailable=str(terminalsAvailable),
foo='bar')
cwd = self.karma_base_dir
karma_inject_file = pjoin(cwd, 'build', 'injector.js')
if not os.path.exists(pjoin(cwd, 'build')):
os.makedirs(pjoin(cwd, 'build'))
with open(karma_inject_file, 'w') as fid:
fid.write("""
require('es6-promise/dist/es6-promise.js');
require('@phosphor/widgets/style/index.css');
var node = document.createElement('script');
node.id = 'jupyter-config-data';
node.type = 'application/json';
node.textContent = '%s';
document.body.appendChild(node);
""" % json.dumps(config))
# validate the pattern
parser = argparse.ArgumentParser()
parser.add_argument('--pattern', action='store')
args, argv = parser.parse_known_args()
pattern = args.pattern or self.karma_pattern
files = glob.glob(pjoin(cwd, pattern))
if not files:
msg = 'No files matching "%s" found in "%s"'
raise ValueError(msg % (pattern, cwd))
# Find and validate the coverage folder
with open(pjoin(cwd, 'package.json')) as fid:
data = json.load(fid)
name = data['name'].replace('@jupyterlab/test-', '')
folder = osp.realpath(pjoin(HERE, '..', '..', 'packages', name))
if not osp.exists(folder):
raise ValueError(
'No source package directory found for "%s", use the pattern '
'"@jupyterlab/test-<package_dir_name>"' % name
)
env = os.environ.copy()
env['KARMA_INJECT_FILE'] = karma_inject_file
env.setdefault('KARMA_FILE_PATTERN', pattern)
env.setdefault('KARMA_COVER_FOLDER', folder)
cwd = self.karma_base_dir
cmd = ['karma', 'start'] + sys.argv[1:]
return cmd, dict(env=env, cwd=cwd)
def run_jest(jest_dir):
"""Run a jest test in the given base directory.
"""
app = JestApp.instance()
app.jest_dir = jest_dir
app.initialize()
app.start()
def run_karma(base_dir):
"""Run a karma test in the given base directory.
"""
logging.disable(logging.WARNING)
app = KarmaTestApp.instance()
app.karma_base_dir = base_dir
app.initialize([])
app.start()
Add karma_coverage_dir to KarmaTestApp
# coding: utf-8
"""A lab app that runs a sub process for a demo or a test."""
from os import path as osp
from os.path import join as pjoin
from stat import S_IRUSR, S_IRGRP, S_IROTH
import argparse
import atexit
import glob
import json
import logging
import os
import pkg_resources
import shutil
import sys
import tempfile
from tempfile import TemporaryDirectory
from unittest.mock import patch
from traitlets import Bool, Dict, Unicode
from ipykernel.kernelspec import write_kernel_spec
import jupyter_core
from jupyter_core.application import base_aliases, base_flags
from jupyterlab_server.process_app import ProcessApp
import jupyterlab_server
HERE = osp.realpath(osp.dirname(__file__))
def _create_notebook_dir():
"""Create a temporary directory with some file structure."""
root_dir = tempfile.mkdtemp(prefix='mock_contents')
os.mkdir(osp.join(root_dir, 'src'))
with open(osp.join(root_dir, 'src', 'temp.txt'), 'w') as fid:
fid.write('hello')
readonly_filepath = osp.join(root_dir, 'src', 'readonly-temp.txt')
with open(readonly_filepath, 'w') as fid:
fid.write('hello from a readonly file')
os.chmod(readonly_filepath, S_IRUSR | S_IRGRP | S_IROTH)
atexit.register(lambda: shutil.rmtree(root_dir, True))
return root_dir
def _create_schemas_dir():
"""Create a temporary directory for schemas."""
root_dir = tempfile.mkdtemp(prefix='mock_schemas')
extension_dir = osp.join(root_dir, '@jupyterlab', 'apputils-extension')
os.makedirs(extension_dir)
# Get schema content.
schema_package = jupyterlab_server.__name__
schema_path = 'tests/schemas/@jupyterlab/apputils-extension/themes.json'
themes = pkg_resources.resource_string(schema_package, schema_path)
with open(osp.join(extension_dir, 'themes.json'), 'w') as fid:
fid.write(themes.decode('utf-8'))
atexit.register(lambda: shutil.rmtree(root_dir, True))
return root_dir
def _create_user_settings_dir():
"""Create a temporary directory for workspaces."""
root_dir = tempfile.mkdtemp(prefix='mock_user_settings')
atexit.register(lambda: shutil.rmtree(root_dir, True))
return root_dir
def _create_workspaces_dir():
"""Create a temporary directory for workspaces."""
root_dir = tempfile.mkdtemp(prefix='mock_workspaces')
atexit.register(lambda: shutil.rmtree(root_dir, True))
return root_dir
def _install_kernels():
# Install echo and ipython kernels - should be done after env patch
kernel_json = {
'argv': [
sys.executable,
'-m', 'jupyterlab.tests.echo_kernel',
'-f', '{connection_file}'
],
'display_name': "Echo Kernel",
'language': 'echo'
}
paths = jupyter_core.paths
kernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', 'echo')
os.makedirs(kernel_dir)
with open(pjoin(kernel_dir, 'kernel.json'), 'w') as f:
f.write(json.dumps(kernel_json))
ipykernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', 'ipython')
write_kernel_spec(ipykernel_dir)
class _test_env(object):
"""Set Jupyter path variables to a temporary directory
Useful as a context manager or with explicit start/stop
"""
def start(self):
self.test_dir = td = TemporaryDirectory()
self.env_patch = patch.dict(os.environ, {
'JUPYTER_CONFIG_DIR': pjoin(td.name, 'jupyter'),
'JUPYTER_DATA_DIR': pjoin(td.name, 'jupyter_data'),
'JUPYTER_RUNTIME_DIR': pjoin(td.name, 'jupyter_runtime'),
'IPYTHONDIR': pjoin(td.name, 'ipython'),
})
self.env_patch.start()
self.path_patch = patch.multiple(
jupyter_core.paths,
SYSTEM_JUPYTER_PATH=[pjoin(td.name, 'share', 'jupyter')],
ENV_JUPYTER_PATH=[pjoin(td.name, 'env', 'share', 'jupyter')],
SYSTEM_CONFIG_PATH=[pjoin(td.name, 'etc', 'jupyter')],
ENV_CONFIG_PATH=[pjoin(td.name, 'env', 'etc', 'jupyter')],
)
self.path_patch.start()
def stop(self):
self.env_patch.stop()
self.path_patch.stop()
try:
self.test_dir.cleanup()
except PermissionError as e:
pass
def __enter__(self):
self.start()
return self.test_dir.name
def __exit__(self, *exc_info):
self.stop()
class ProcessTestApp(ProcessApp):
"""A process app for running tests, includes a mock contents directory.
"""
allow_origin = Unicode('*')
notebook_dir = Unicode(_create_notebook_dir())
schemas_dir = Unicode(_create_schemas_dir())
user_settings_dir = Unicode(_create_user_settings_dir())
workspaces_dir = Unicode(_create_workspaces_dir())
def __init__(self):
self.env_patch = _test_env()
self.env_patch.start()
ProcessApp.__init__(self)
def init_server_extensions(self):
"""Disable server extensions"""
pass
def start(self):
_install_kernels()
self.kernel_manager.default_kernel_name = 'echo'
self.lab_config.schemas_dir = self.schemas_dir
self.lab_config.user_settings_dir = self.user_settings_dir
self.lab_config.workspaces_dir = self.workspaces_dir
ProcessApp.start(self)
def _process_finished(self, future):
self.http_server.stop()
self.io_loop.stop()
self.env_patch.stop()
try:
os._exit(future.result())
except Exception as e:
self.log.error(str(e))
os._exit(1)
jest_aliases = dict(base_aliases)
jest_aliases.update({
'testPathPattern': 'JestApp.testPathPattern'
})
jest_aliases.update({
'testNamePattern': 'JestApp.testNamePattern'
})
jest_flags = dict(base_flags)
jest_flags['coverage'] = (
{'JestApp': {'coverage': True}},
'Run coverage'
)
jest_flags['watchAll'] = (
{'JestApp': {'watchAll': True}},
'Watch all test files'
)
class JestApp(ProcessTestApp):
"""A notebook app that runs a jest test."""
coverage = Bool(False, help='Whether to run coverage').tag(config=True)
testPathPattern = Unicode('').tag(config=True)
testNamePattern = Unicode('').tag(config=True)
watchAll = Bool(False).tag(config=True)
aliases = jest_aliases
flags = jest_flags
jest_dir = Unicode('')
test_config = Dict(dict(foo='bar'))
open_browser = False
def get_command(self):
"""Get the command to run"""
terminalsAvailable = self.web_app.settings['terminals_available']
jest = './node_modules/.bin/jest'
debug = self.log.level == logging.DEBUG
jest = osp.realpath(osp.join(self.jest_dir, jest))
if os.name == 'nt':
jest += '.cmd'
cmd = []
if self.coverage:
cmd += [jest, '--coverage']
elif debug:
cmd += ['node', '--inspect-brk', jest, '--no-cache']
if self.watchAll:
cmd += ['--watchAll']
else:
cmd += ['--watch']
else:
cmd += [jest]
if self.testPathPattern:
cmd += ['--testPathPattern', self.testPathPattern]
if self.testNamePattern:
cmd += ['--testNamePattern', self.testNamePattern]
cmd += ['--runInBand']
if self.log_level > logging.INFO:
cmd += ['--silent']
config = dict(baseUrl=self.connection_url,
terminalsAvailable=str(terminalsAvailable),
token=self.token)
config.update(**self.test_config)
td = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(td, True))
config_path = os.path.join(td, 'config.json')
with open(config_path, 'w') as fid:
json.dump(config, fid)
env = os.environ.copy()
env['JUPYTER_CONFIG_DATA'] = config_path
return cmd, dict(cwd=self.jest_dir, env=env)
class KarmaTestApp(ProcessTestApp):
"""A notebook app that runs the jupyterlab karma tests.
"""
karma_pattern = Unicode('src/*.spec.ts*')
karma_base_dir = Unicode('')
karma_coverage_dir = Unicode('')
def get_command(self):
"""Get the command to run."""
terminalsAvailable = self.web_app.settings['terminals_available']
# Compatibility with Notebook 4.2.
token = getattr(self, 'token', '')
config = dict(baseUrl=self.connection_url, token=token,
terminalsAvailable=str(terminalsAvailable),
foo='bar')
cwd = self.karma_base_dir
karma_inject_file = pjoin(cwd, 'build', 'injector.js')
if not os.path.exists(pjoin(cwd, 'build')):
os.makedirs(pjoin(cwd, 'build'))
with open(karma_inject_file, 'w') as fid:
fid.write("""
require('es6-promise/dist/es6-promise.js');
require('@phosphor/widgets/style/index.css');
var node = document.createElement('script');
node.id = 'jupyter-config-data';
node.type = 'application/json';
node.textContent = '%s';
document.body.appendChild(node);
""" % json.dumps(config))
# validate the pattern
parser = argparse.ArgumentParser()
parser.add_argument('--pattern', action='store')
args, argv = parser.parse_known_args()
pattern = args.pattern or self.karma_pattern
files = glob.glob(pjoin(cwd, pattern))
if not files:
msg = 'No files matching "%s" found in "%s"'
raise ValueError(msg % (pattern, cwd))
# Find and validate the coverage folder if not specified
if not self.karma_coverage_dir:
with open(pjoin(cwd, 'package.json')) as fid:
data = json.load(fid)
name = data['name'].replace('@jupyterlab/test-', '')
folder = osp.realpath(pjoin(HERE, '..', '..', 'packages', name))
if not osp.exists(folder):
raise ValueError(
'No source package directory found for "%s", use the pattern '
'"@jupyterlab/test-<package_dir_name>"' % name
)
self.karma_coverage_dir = folder
env = os.environ.copy()
env['KARMA_INJECT_FILE'] = karma_inject_file
env.setdefault('KARMA_FILE_PATTERN', pattern)
env.setdefault('KARMA_COVER_FOLDER', self.karma_coverage_dir)
cwd = self.karma_base_dir
cmd = ['karma', 'start'] + sys.argv[1:]
return cmd, dict(env=env, cwd=cwd)
def run_jest(jest_dir):
"""Run a jest test in the given base directory.
"""
app = JestApp.instance()
app.jest_dir = jest_dir
app.initialize()
app.start()
def run_karma(base_dir, coverage_dir=''):
"""Run a karma test in the given base directory.
"""
logging.disable(logging.WARNING)
app = KarmaTestApp.instance()
app.karma_base_dir = base_dir
app.karma_coverage_dir = coverage_dir
app.initialize([])
app.start()
|
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import re
from wtforms import ValidationError
from indico.util.string import is_valid_mail
from indico_payment_paypal import _
def validate_business(form, field):
"""Valiates a PayPal business string.
It can either be an email address or a paypal business account ID.
"""
if not is_valid_mail(field.data, multi=False) and not re.match(r'^[a-zA-Z0-9]{13}$', field.data):
raise ValidationError(_('Invalid email address / paypal ID'))
Fix docstring typo
# This file is part of the Indico plugins.
# Copyright (C) 2002 - 2022 CERN
#
# The Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License;
# see the LICENSE file for more details.
import re
from wtforms import ValidationError
from indico.util.string import is_valid_mail
from indico_payment_paypal import _
def validate_business(form, field):
"""Validates a PayPal business string.
It can either be an email address or a paypal business account ID.
"""
if not is_valid_mail(field.data, multi=False) and not re.match(r'^[a-zA-Z0-9]{13}$', field.data):
raise ValidationError(_('Invalid email address / paypal ID'))
|
import calendar
import os
from datetime import *
import xlsxwriter
"""test_replicate_worksheet.py: IT on Income Statement Excel workbook"""
__author__ = "Prajesh Ananthan"
def test_create_XLSheet():
workbook = None
tablerange = 'B3:D7'
filename = getfilename()
success = False
data = get_mock_columns_with_data()
options = {'data': data,
'columns':
[
{'header': 'ITEM'},
{'header': 'COST'},
{'header': 'STATUS'}
]
}
try:
workbook = xlsxwriter.Workbook('output/{}'.format(filename))
monthlist = test_get_list_months()
for month in monthlist:
worksheet = workbook.add_worksheet(name=month)
worksheet.set_column(1, 3, 15)
worksheet.add_table(tablerange, options)
success = True
print("INFO: {} created!".format(workbook.filename))
except Exception as e:
print(e, "Unable to write onto {}!".format(filename))
finally:
workbook.close()
return [success, workbook.filename]
def launchFileInWindows(unixpath):
win32path = os.path.normcase(unixpath)
try:
if os.path.exists(win32path):
print("INFO: Launching {}...".format(win32path))
os.system(win32path)
except Exception as e:
print(e)
def test_get_list_months():
date1 = datetime.strptime("2017-01-01", "%Y-%m-%d")
date2 = datetime.strptime("2018-01-12", "%Y-%m-%d")
months_str = calendar.month_name
months = []
while date1 < date2:
month = date1.month
year = date1.year
month_str = months_str[month][0:3]
months.append("{0}-{1}".format(month_str, str(year)[-2:]))
next_month = month + 1 if month != 12 else 1
next_year = year + 1 if next_month == 1 else year
date1 = date1.replace(month=next_month, year=next_year)
return months
def getfilename():
prefix = "ANNUAL_CASHFLOW"
year = "2017"
extension = ".xlsx"
return "{}--{}{}".format(prefix, year, extension)
def get_mock_columns_with_data():
data = [
['PTPTN', 10000],
['Rent', 2000],
['Car Loan', 6000],
['Unifi', 500]
]
return data
def run_test_cases():
[success, filename] = test_create_XLSheet()
if success == True:
launchFileInWindows(filename)
if __name__ == '__main__':
run_test_cases()
Added merge and center column
import calendar
import os
from datetime import *
import xlsxwriter
"""test_replicate_worksheet.py: IT on Income Statement Excel workbook"""
__author__ = "Prajesh Ananthan"
def test_create_XLSheet():
workbook = None
tablerange = 'B3:D7'
filename = getfilename()
success = False
data = get_mock_columns_with_data()
options = {'data': data,
'columns':
[
{'header': 'ITEM'},
{'header': 'COST'},
{'header': 'STATUS'}
]
}
try:
workbook = xlsxwriter.Workbook('output/{}'.format(filename))
monthlist = test_get_list_months()
merge_format = workbook.add_format({
'bold': 2,
'border': 2,
'align': 'center',
'valign': 'vcenter',
'fg_color': 'yellow'})
for month in monthlist:
worksheet = workbook.add_worksheet(name=month)
worksheet.set_column(1, 3, 15)
worksheet.merge_range('B2:D2', 'MERGED RANGE', merge_format)
worksheet.add_table(tablerange, options)
success = True
print("INFO: {} created!".format(workbook.filename))
except Exception as e:
print(e, "Unable to write onto {}!".format(filename))
finally:
workbook.close()
return [success, workbook.filename]
def launchFileInWindows(unixpath):
win32path = os.path.normcase(unixpath)
try:
if os.path.exists(win32path):
print("INFO: Launching {}...".format(win32path))
os.system(win32path)
except Exception as e:
print(e)
def test_get_list_months():
date1 = datetime.strptime("2017-01-01", "%Y-%m-%d")
date2 = datetime.strptime("2018-01-12", "%Y-%m-%d")
months_str = calendar.month_name
months = []
while date1 < date2:
month = date1.month
year = date1.year
month_str = months_str[month][0:3]
months.append("{0}-{1}".format(month_str, str(year)[-2:]))
next_month = month + 1 if month != 12 else 1
next_year = year + 1 if next_month == 1 else year
date1 = date1.replace(month=next_month, year=next_year)
return months
def getfilename():
prefix = "TEST_ANNUAL_CASHFLOW"
year = "2017"
extension = ".xlsx"
return "{}--{}{}".format(prefix, year, extension)
def get_mock_columns_with_data():
data = [
['Item1', 10000],
['Item2', 2000],
['Item3', 6000],
['Item4', 500]
]
return data
def run_test_cases():
[success, filename] = test_create_XLSheet()
if success == True:
launchFileInWindows(filename)
if __name__ == '__main__':
run_test_cases()
|
from django.core.management.base import BaseCommand
from lxml import etree, html
import urllib2
from os import path
import importlib
class Command(BaseCommand):
help = 'Processes XSLT transformation on a fetched by URL resource and outputs the result'
def add_arguments(self, parser):
parser.add_argument('url', help='URL to fetch source XML')
parser.add_argument('xslt_file', help='Path to XSLT transformation file')
parser.add_argument('--validate', action='store_true',
help='Validate against Relax NG schema after transformation')
rng_file = path.join(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))), 'schema.rng')
parser.add_argument('--rng_file', default=rng_file,
help='Path to RELAX NG file. Defaults to schema.rng in module dir. '
'Used only if --validate is set')
parser.add_argument('--save', action='store_true',
help='Save data to the model. Successful validation against Relax NG '
'schema is required. Model names and fields in transformed XML '
'must represent existing models and fields. Otherwise import '
'will break with an exception')
def handle(self, *args, **options):
response = urllib2.urlopen(options['url'])
encoding = response.headers.getparam('charset')
content_type = response.info().type
if 'xml' in content_type:
source_etree = etree.parse(response)
elif 'html' in content_type:
source_etree = html.parse(response)
xslt_etree = etree.parse(options['xslt_file'])
transform = etree.XSLT(xslt_etree)
transformed_etree = transform(source_etree)
output = etree.tostring(transformed_etree, pretty_print=True, encoding=encoding)
print '<?xml version="1.0" encoding="' + encoding + '"?>\n' + output
if options['validate'] or options['save']:
rng_file_etree = etree.parse(options['rng_file'])
relaxng = etree.RelaxNG(rng_file_etree)
try:
relaxng.assertValid(transformed_etree)
print 'Document is valid'
if options['save']:
saved_objects_count = 0
for model_element in transformed_etree.xpath('//model'):
model = self.get_model(model_element.attrib['model'])
for item_element in model_element.xpath('.//item'):
obj = model()
for field_element in item_element.xpath('.//field'):
setattr(obj, field_element.attrib['name'], field_element.text)
for fk_element in item_element.xpath('.//fk'):
fk_item_element_selector = '//model[@model="{}"]//item[@key="{}"]'.format(
fk_element.attrib['model'],
fk_element.attrib['key']
)
fk_model = self.get_model(fk_element.attrib['model'])
fk_obj = fk_model()
fk_item_element = transformed_etree.xpath(fk_item_element_selector)[0]
for field_element in fk_item_element.xpath('.//field'):
setattr(fk_obj, field_element.attrib['name'], field_element.text)
fk_obj.save()
for field in model._meta.get_fields():
if field.related_model == fk_model:
setattr(obj, field.name, fk_obj)
obj.save()
saved_objects_count += 1
print 'Saved objects: ' + str(saved_objects_count)
except etree.DocumentInvalid as ex:
print 'Document is not valid: ' + str(ex)
def get_model(self, model_path_string):
'''
Returns model object by string, containing its path
Path is in format: application_name.ModelName
The same format like by manage.py dumpdata
'''
application_name, model_name = model_path_string.split('.')
models_import_str = application_name + '.models'
models = importlib.import_module(models_import_str)
model = getattr(models, model_name)
return model
Extract method get_related_item_element from method handle for refactoring
Returns related element by its foreign key
It takes <fk /> element and finds related <item /> element by attributes
from django.core.management.base import BaseCommand
from lxml import etree, html
import urllib2
from os import path
import importlib
class Command(BaseCommand):
help = 'Processes XSLT transformation on a fetched by URL resource and outputs the result'
def add_arguments(self, parser):
parser.add_argument('url', help='URL to fetch source XML')
parser.add_argument('xslt_file', help='Path to XSLT transformation file')
parser.add_argument('--validate', action='store_true',
help='Validate against Relax NG schema after transformation')
rng_file = path.join(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))), 'schema.rng')
parser.add_argument('--rng_file', default=rng_file,
help='Path to RELAX NG file. Defaults to schema.rng in module dir. '
'Used only if --validate is set')
parser.add_argument('--save', action='store_true',
help='Save data to the model. Successful validation against Relax NG '
'schema is required. Model names and fields in transformed XML '
'must represent existing models and fields. Otherwise import '
'will break with an exception')
def handle(self, *args, **options):
response = urllib2.urlopen(options['url'])
encoding = response.headers.getparam('charset')
content_type = response.info().type
if 'xml' in content_type:
source_etree = etree.parse(response)
elif 'html' in content_type:
source_etree = html.parse(response)
xslt_etree = etree.parse(options['xslt_file'])
transform = etree.XSLT(xslt_etree)
transformed_etree = transform(source_etree)
output = etree.tostring(transformed_etree, pretty_print=True, encoding=encoding)
print '<?xml version="1.0" encoding="' + encoding + '"?>\n' + output
if options['validate'] or options['save']:
rng_file_etree = etree.parse(options['rng_file'])
relaxng = etree.RelaxNG(rng_file_etree)
try:
relaxng.assertValid(transformed_etree)
print 'Document is valid'
if options['save']:
saved_objects_count = 0
for model_element in transformed_etree.xpath('//model'):
model = self.get_model(model_element.attrib['model'])
for item_element in model_element.xpath('.//item'):
obj = model()
for field_element in item_element.xpath('.//field'):
setattr(obj, field_element.attrib['name'], field_element.text)
for fk_element in item_element.xpath('.//fk'):
fk_model = self.get_model(fk_element.attrib['model'])
fk_obj = fk_model()
related_element = self.get_related_item_element(fk_element)
for field_element in related_element.xpath('.//field'):
setattr(fk_obj, field_element.attrib['name'], field_element.text)
fk_obj.save()
for field in model._meta.get_fields():
if field.related_model == fk_model:
setattr(obj, field.name, fk_obj)
obj.save()
saved_objects_count += 1
print 'Saved objects: ' + str(saved_objects_count)
except etree.DocumentInvalid as ex:
print 'Document is not valid: ' + str(ex)
def get_model(self, model_path_string):
'''
Returns model object by string, containing its path
Path is in format: application_name.ModelName
The same format like by manage.py dumpdata
'''
application_name, model_name = model_path_string.split('.')
models_import_str = application_name + '.models'
models = importlib.import_module(models_import_str)
model = getattr(models, model_name)
return model
def get_related_item_element(self, fk_element):
'''
Returns related element by its foreign key
It takes <fk /> element and finds related <item />
element by attributes
'''
fk_item_element_selector = '//model[@model="{}"]//item[@key="{}"]'.format(
fk_element.attrib['model'],
fk_element.attrib['key']
)
fk_item_element = fk_element.xpath(fk_item_element_selector)[0]
return fk_item_element
|
from ..rman_utils.node_desc import NodeDesc
from ..rman_utils import filepath_utils
from ..rman_utils.filepath import FilePath
from ..rman_utils import texture_utils
from ..rman_utils import property_utils
from ..rfb_logger import rfb_log
from .rman_socket_utils import node_add_inputs
from .rman_socket_utils import node_add_outputs
from .. import rman_render
from .. import properties
from ..rman_properties import rman_properties_scene
from ..rman_properties import rman_properties_renderlayers
from ..rman_properties import rman_properties_world
from ..rman_properties import rman_properties_camera
from nodeitems_utils import NodeCategory, NodeItem
from collections import OrderedDict
from operator import attrgetter
from bpy.props import *
import bpy
import os
import sys
import traceback
import nodeitems_utils
# registers
from . import rman_bl_nodes_sockets
from . import rman_bl_nodes_shaders
from . import rman_bl_nodes_ops
from . import rman_bl_nodes_props
__RMAN_DISPLAY_NODES__ = []
__RMAN_BXDF_NODES__ = []
__RMAN_DISPLACE_NODES__ = []
__RMAN_INTEGRATOR_NODES__ = []
__RMAN_PROJECTION_NODES__ = []
__RMAN_DISPLAYFILTER_NODES__ = []
__RMAN_SAMPLEFILTER_NODES__ = []
__RMAN_PATTERN_NODES__ = []
__RMAN_LIGHT_NODES__ = []
__RMAN_LIGHTFILTER_NODES__ = []
__RMAN_NODE_TYPES__ = dict()
__RMAN_NODE_CATEGORIES__ = dict()
__RMAN_NODE_CATEGORIES__['bxdf'] = ('RenderMan Bxdfs', [])
__RMAN_NODE_CATEGORIES__['light'] = ('RenderMan Lights', [])
__RMAN_NODE_CATEGORIES__['patterns_misc'] = ('RenderMan Misc Patterns', [])
__RMAN_NODE_CATEGORIES__['displace'] = ('RenderMan Displacements', [])
__RMAN_NODES__ = {
'displaydriver': __RMAN_DISPLAY_NODES__,
'bxdf': __RMAN_BXDF_NODES__,
'displace': __RMAN_DISPLACE_NODES__,
'integrator': __RMAN_INTEGRATOR_NODES__,
'projection': __RMAN_PROJECTION_NODES__,
'displayfilter': __RMAN_DISPLAYFILTER_NODES__,
'samplefilter': __RMAN_SAMPLEFILTER_NODES__,
'pattern': __RMAN_PATTERN_NODES__,
'light': __RMAN_LIGHT_NODES__,
'lightfilter': __RMAN_LIGHTFILTER_NODES__
}
__RMAN_PLUGIN_MAPPING__ = {
'integrator': rman_properties_scene.RendermanSceneSettings,
'displaydriver': rman_properties_renderlayers.RendermanAOV,
'projection': rman_properties_camera.RendermanCameraSettings,
'light': rman_bl_nodes_props.RendermanLightSettings,
'lightfilter': rman_bl_nodes_props.RendermanLightSettings,
'displayfilter': rman_bl_nodes_props.RendermanDisplayFilterSettings,
'samplefilter': rman_bl_nodes_props.RendermanSampleFilterSettings,
}
def update_conditional_visops(node):
for param_name, prop_meta in getattr(node, 'prop_meta').items():
if 'conditionalVisOp' in prop_meta:
cond_expr = prop_meta['conditionalVisOp']['expr']
try:
hidden = not eval(cond_expr)
prop_meta['hidden'] = hidden
if hasattr(node, 'inputs') and param_name in node.inputs:
node.inputs[param_name].hide = hidden
except:
print("Error in conditional visop: %s" % (cond_expr))
def assetid_update_func(self, context):
node = self.node if hasattr(self, 'node') else self
light = None
active = context.active_object
if active.type == 'LIGHT':
light = active.data
texture_utils.update_texture(node, light=light)
if context and hasattr(context, 'material'):
mat = context.material
if mat:
node.update_mat(mat)
elif context and hasattr(context, 'node'):
mat = context.space_data.id
if mat:
node.update_mat(mat)
def update_func_with_inputs(self, context):
# check if this prop is set on an input
node = self.node if hasattr(self, 'node') else self
if context and hasattr(context, 'material'):
mat = context.material
if mat:
node.update_mat(mat)
elif context and hasattr(context, 'node'):
mat = context.space_data.id
if mat:
node.update_mat(mat)
# update the conditional_vis_ops
update_conditional_visops(node)
if node.bl_idname in ['PxrLayerPatternNode', 'PxrSurfaceBxdfNode']:
node_add_inputs(node, node.name, node.prop_names)
else:
update_inputs(node)
# set any inputs that are visible and param is hidden to hidden
prop_meta = getattr(node, 'prop_meta')
if hasattr(node, 'inputs'):
for input_name, socket in node.inputs.items():
if 'hidden' in prop_meta[input_name]:
socket.hide = prop_meta[input_name]['hidden']
def update_func(self, context):
# check if this prop is set on an input
node = self.node if hasattr(self, 'node') else self
if context and hasattr(context, 'material'):
mat = context.material
if mat:
node.update_mat(mat)
elif context and hasattr(context, 'node'):
mat = context.space_data.id
if mat:
node.update_mat(mat)
# update the conditional_vis_ops
update_conditional_visops(node)
# set any inputs that are visible and param is hidden to hidden
prop_meta = getattr(node, 'prop_meta')
if hasattr(node, 'inputs'):
for input_name, socket in node.inputs.items():
if input_name not in prop_meta:
continue
if 'hidden' in prop_meta[input_name] \
and prop_meta[input_name]['hidden'] and not socket.hide:
socket.hide = True
def update_integrator_func(self, context):
rr = rman_render.RmanRender.get_rman_render()
if rr.rman_interactive_running:
rr.rman_scene.update_integrator(context)
def class_generate_properties(node, parent_name, node_desc):
prop_names = []
prop_meta = {}
output_meta = OrderedDict()
if "__annotations__" not in node.__dict__:
setattr(node, "__annotations__", {})
# pxr osl and seexpr need these to find the code
if parent_name in ["PxrOSL", "PxrSeExpr"]:
# Enum for internal, external type selection
EnumName = "codetypeswitch"
if parent_name == 'PxrOSL':
EnumProp = EnumProperty(items=(('EXT', "External", ""),
('INT', "Internal", "")),
name="Shader Location", default='INT')
else:
EnumProp = EnumProperty(items=(('NODE', "Node", ""),
('INT', "Internal", "")),
name="Expr Location", default='NODE')
EnumMeta = {'renderman_name': 'filename',
'name': 'codetypeswitch',
'renderman_type': 'string',
'default': '', 'label': 'codetypeswitch',
'type': 'enum', 'options': '',
'widget': 'mapper', '__noconnection': True}
node.__annotations__[EnumName] = EnumProp
prop_names.append(EnumName)
prop_meta[EnumName] = EnumMeta
# Internal file search prop
InternalName = "internalSearch"
InternalProp = StringProperty(name="Shader to use",
description="Storage space for internal text data block",
default="")
InternalMeta = {'renderman_name': 'filename',
'name': 'internalSearch',
'renderman_type': 'string',
'default': '', 'label': 'internalSearch',
'type': 'string', 'options': '',
'widget': 'fileinput', '__noconnection': True}
node.__annotations__[InternalName] = InternalProp
prop_names.append(InternalName)
prop_meta[InternalName] = InternalMeta
# External file prop
codeName = "shadercode"
codeProp = StringProperty(name='External File', default='',
subtype="FILE_PATH", description='')
codeMeta = {'renderman_name': 'filename',
'name': 'ShaderCode', 'renderman_type': 'string',
'default': '', 'label': 'ShaderCode',
'type': 'string', 'options': '',
'widget': 'fileinput', '__noconnection': True}
node.__annotations__[codeName] = codeProp
prop_names.append(codeName)
prop_meta[codeName] = codeMeta
# inputs
for node_desc_param in node_desc.params:
update_function = None
if node_desc.node_type == 'integrator':
update_function = update_integrator_func
else:
param_widget = node_desc_param.widget.lower() if hasattr(node_desc_param,'widget') else 'default'
if param_widget == 'fileinput' or param_widget == 'assetidinput' or (param_widget == 'default' and node_desc_param.name == 'filename'):
update_function = assetid_update_func
else:
update_function = update_func_with_inputs if 'enable' in node_desc_param.name else update_func
if not update_function:
update_function = update_func
name, meta, prop = property_utils.generate_property(node_desc_param, update_function=update_function)
if name is None:
continue
if hasattr(node_desc_param, 'page') and node_desc_param.page != '':
page = node_desc_param.page
tokens = page.split('|')
sub_prop_names = prop_names
page_name = tokens[0]
if page_name not in prop_meta:
sub_prop_names.append(page_name)
prop_meta[page_name] = {'renderman_type': 'page'}
ui_label = "%s_uio" % page_name
node.__annotations__[ui_label] = BoolProperty(name=ui_label, default=False)
setattr(node, page_name, [])
if parent_name == 'PxrSurface' and 'Globals' not in page_name:
enable_param_name = 'enable' + page_name.replace(' ', '')
if enable_param_name not in prop_meta:
prop_meta[enable_param_name] = {
'renderman_type': 'enum', 'renderman_name': enable_param_name}
default = page_name == 'Diffuse'
enable_param_prop = BoolProperty(name="Enable " + page_name,
default=bool(default),
update=update_func_with_inputs)
node.__annotations__[enable_param_name] = enable_param_prop
page_prop_names = getattr(node, page_name)
if enable_param_name not in page_prop_names:
page_prop_names.append(enable_param_name)
setattr(node, page_name, page_prop_names)
if len(tokens) > 1:
for i in range(1, len(tokens)):
parent_page = page_name
page_name += '.' + tokens[i]
if page_name not in prop_meta:
prop_meta[page_name] = {'renderman_type': 'page'}
ui_label = "%s_uio" % page_name
node.__annotations__[ui_label] = BoolProperty(name=ui_label, default=False)
setattr(node, page_name, [])
sub_prop_names = getattr(node, parent_page)
if page_name not in sub_prop_names:
sub_prop_names.append(page_name)
setattr(node, parent_page, sub_prop_names)
sub_prop_names = getattr(node, page_name)
sub_prop_names.append(name)
setattr(node, page_name, sub_prop_names)
prop_meta[name] = meta
node.__annotations__[name] = prop
else:
prop_names.append(name)
prop_meta[name] = meta
node.__annotations__[name] = prop
# outputs
for node_desc_param in node_desc.outputs:
renderman_type = node_desc_param.type
prop_name = node_desc_param.name
output_prop_meta = dict()
if hasattr(node_desc_param, 'vstructmember'):
output_prop_meta['vstructmember'] = node_desc_param.vstructmember
if hasattr(node_desc_param, 'vstructConditionalExpr'):
output_prop_meta['vstructConditionalExpr'] = node_desc_param.vstructConditionalExpr
output_prop_meta['name'] = node_desc_param.name
output_meta[prop_name] = output_prop_meta #node_desc_param
output_meta[prop_name]['renderman_type'] = renderman_type
setattr(node, 'prop_names', prop_names)
setattr(node, 'prop_meta', prop_meta)
setattr(node, 'output_meta', output_meta)
def generate_node_type(node_desc):
''' Dynamically generate a node type from pattern '''
name = node_desc.name
nodeType = node_desc.node_type #args.find("shaderType/tag").attrib['value']
typename = '%s%sNode' % (name, nodeType.capitalize())
nodeDict = {'bxdf': rman_bl_nodes_shaders.RendermanBxdfNode,
'pattern': rman_bl_nodes_shaders.RendermanPatternNode,
'displace': rman_bl_nodes_shaders.RendermanDisplacementNode,
'light': rman_bl_nodes_shaders.RendermanLightNode}
if nodeType not in nodeDict.keys():
return (None, None)
ntype = type(typename, (nodeDict[nodeType],), {})
ntype.bl_label = name
ntype.typename = typename
def init(self, context):
if self.renderman_node_type == 'bxdf':
self.outputs.new('RendermanShaderSocket', "Bxdf").type = 'SHADER'
#socket_template = self.socket_templates.new(identifier='Bxdf', name='Bxdf', type='SHADER')
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
# if this is PxrLayerSurface set the diffusegain to 0. The default
# of 1 is unintuitive
if self.plugin_name == 'PxrLayerSurface':
self.diffuseGain = 0
elif self.renderman_node_type == 'light':
# only make a few sockets connectable
node_add_inputs(self, name, self.prop_names)
self.outputs.new('RendermanShaderSocket', "Light")
elif self.renderman_node_type == 'displace':
# only make the color connectable
self.outputs.new('RendermanShaderSocket', "Displacement")
node_add_inputs(self, name, self.prop_names)
# else pattern
elif name == "PxrOSL":
self.outputs.clear()
else:
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
if name == "PxrRamp":
node_group = bpy.data.node_groups.new(
'PxrRamp_nodegroup', 'ShaderNodeTree')
node_group.nodes.new('ShaderNodeValToRGB')
node_group.use_fake_user = True
self.node_group = node_group.name
update_conditional_visops(self)
def free(self):
if name == "PxrRamp":
bpy.data.node_groups.remove(bpy.data.node_groups[self.node_group])
ntype.init = init
ntype.free = free
if "__annotations__" not in ntype.__dict__:
setattr(ntype, "__annotations__", {})
if name == 'PxrRamp':
ntype.__annotations__['node_group'] = StringProperty('color_ramp', default='')
ntype.__annotations__['plugin_name'] = StringProperty(name='Plugin Name',
default=name, options={'HIDDEN'})
# lights cant connect to a node tree in 20.0
class_generate_properties(ntype, name, node_desc)
if nodeType == 'light':
ntype.__annotations__['light_shading_rate'] = FloatProperty(
name="Light Shading Rate",
description="Shading Rate for this light. \
Leave this high unless detail is missing",
default=100.0)
ntype.__annotations__['light_primary_visibility'] = BoolProperty(
name="Light Primary Visibility",
description="Camera visibility for this light",
default=True)
bpy.utils.register_class(ntype)
return (typename, ntype)
def register_plugin_to_parent(ntype, name, node_desc, plugin_type, parent):
class_generate_properties(ntype, name, node_desc)
setattr(ntype, 'renderman_node_type', plugin_type)
if "__annotations__" not in parent.__dict__:
setattr(parent, "__annotations__", {})
# register and add to scene_settings
bpy.utils.register_class(ntype)
settings_name = "%s_settings" % name
parent.__annotations__["%s_settings" % name] = PointerProperty(type=ntype, name="%s Settings" % name)
if "__annotations__" not in rman_properties_world.RendermanWorldSettings.__dict__:
setattr(rman_properties_world.RendermanWorldSettings, "__annotations__", {})
# special case for world lights
if plugin_type == 'light' and name in ['PxrDomeLight', 'PxrEnvDayLight']:
rman_properties_world.RendermanWorldSettings.__annotations__["%s_settings" % name] = PointerProperty(type=ntype, name="%s Settings" % name)
def register_plugin_types(node_desc):
items = []
if node_desc.node_type not in __RMAN_PLUGIN_MAPPING__:
return
parent = __RMAN_PLUGIN_MAPPING__[node_desc.node_type]
name = node_desc.name
if node_desc.node_type == 'displaydriver':
# remove the d_ prefix
name = name.split('d_')[1]
typename = name + node_desc.node_type.capitalize() + 'Settings'
ntype = type(typename, (rman_bl_nodes_props.RendermanPluginSettings,), {})
ntype.bl_label = name
ntype.typename = typename
ntype.bl_idname = typename
ntype.plugin_name = name
try:
register_plugin_to_parent(ntype, name, node_desc, node_desc.node_type, parent)
except Exception as e:
rfb_log().error("Error registering plugin ", name)
traceback.print_exc()
def get_path_list():
paths = []
rmantree = filepath_utils.guess_rmantree()
paths.append(os.path.join(rmantree, 'lib', 'plugins'))
paths.append(os.path.join(rmantree, 'lib', 'shaders'))
paths.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'Args'))
if 'RMAN_RIXPLUGINPATH' in os.environ:
RMAN_RIXPLUGINPATH = os.environ['RMAN_RIXPLUGINPATH']
for p in RMAN_RIXPLUGINPATH.split(':'):
paths.append(os.path.join(p, 'Args'))
if 'RMAN_SHADERPATH' in os.environ:
RMAN_SHADERPATH = os.environ['RMAN_SHADERPATH']
for p in RMAN_SHADERPATH.split(':'):
paths.append(p)
return paths
class RendermanPatternNodeCategory(NodeCategory):
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'ShaderNodeTree'
def register_rman_nodes():
global __RMAN_NODE_CATEGORIES__
rfb_log().debug("Registering RenderMan Plugin Nodes:")
path_list = get_path_list()
for path in path_list:
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if filename.endswith(('.args', '.oso')):
node_desc = NodeDesc(FilePath(root).join(FilePath(filename)))
__RMAN_NODES__[node_desc.node_type].append(node_desc)
rfb_log().debug("\t%s" % node_desc.name)
# These plugin types are special. They are not actually shading
# nodes that can be used in Blender's shading editor, but
# we still create PropertyGroups for them so they can be inserted
# into the correct UI panel.
if node_desc.node_type in ['integrator', 'projection', 'displaydriver',
'displayfilter', 'samplefilter',
'light',
'lightfilter']:
register_plugin_types(node_desc)
if node_desc.name != 'PxrMeshLight':
# for mesh light, we need to create a shader graph node
continue
typename, nodetype = generate_node_type(node_desc)
if not typename and not nodetype:
continue
if typename and nodetype:
__RMAN_NODE_TYPES__[typename] = nodetype
# categories
node_item = NodeItem(typename, label=nodetype.bl_label)
if node_desc.node_type == 'pattern':
if hasattr(node_desc, 'classification'):
try:
tokens = node_desc.classification.split('/')
category = tokens[-1].lower()
print("CLASS: %s CAT: %s" %s (node_desc.classification, category))
lst = __RMAN_NODE_CATEGORIES__.get('patterns_%s' % category, None)
if not lst:
lst = ('RenderMan %s Patterns' % category.capitalize(), [])
lst[1].append(node_item)
__RMAN_NODE_CATEGORIES__['patterns_%s' % category] = lst
except:
pass
else:
__RMAN_NODE_CATEGORIES__['patterns_misc'][1].append(node_item)
elif node_desc.node_type == 'bxdf':
__RMAN_NODE_CATEGORIES__['bxdf'][1].append(node_item)
elif node_desc.node_type == 'displace':
__RMAN_NODE_CATEGORIES__['displace'][1].append(node_item)
elif node_desc.node_type == 'light':
__RMAN_NODE_CATEGORIES__['light'][1].append(node_item)
rfb_log().debug("Finished Registering RenderMan Plugin Nodes.")
# all categories in a list
node_categories = [
# identifier, label, items list
RendermanPatternNodeCategory("PRMan_output_nodes", "RenderMan Outputs",
items=[NodeItem('RendermanOutputNode', label=rman_bl_nodes_shaders.RendermanOutputNode.bl_label)]),
]
for name, (desc, items) in __RMAN_NODE_CATEGORIES__.items():
node_categories.append(RendermanPatternNodeCategory(name, desc,
items=sorted(items,
key=attrgetter('_label'))))
nodeitems_utils.register_node_categories("RENDERMANSHADERNODES",
node_categories)
def register():
register_rman_nodes()
rman_bl_nodes_props.register()
rman_bl_nodes_sockets.register()
rman_bl_nodes_shaders.register()
rman_bl_nodes_ops.register()
def unregister():
nodeitems_utils.unregister_node_categories("RENDERMANSHADERNODES")
rman_bl_nodes_props.unregister()
rman_bl_nodes_sockets.unregister()
rman_bl_nodes_shaders.unregister()
rman_bl_nodes_ops.unregister()
for cls in classes:
bpy.utils.unregister_class(cls)
Fixes to pattern categories.
If we can't determine pattern category, put in misc. category.
from ..rman_utils.node_desc import NodeDesc
from ..rman_utils import filepath_utils
from ..rman_utils.filepath import FilePath
from ..rman_utils import texture_utils
from ..rman_utils import property_utils
from ..rfb_logger import rfb_log
from .rman_socket_utils import node_add_inputs
from .rman_socket_utils import node_add_outputs
from .. import rman_render
from .. import properties
from ..rman_properties import rman_properties_scene
from ..rman_properties import rman_properties_renderlayers
from ..rman_properties import rman_properties_world
from ..rman_properties import rman_properties_camera
from nodeitems_utils import NodeCategory, NodeItem
from collections import OrderedDict
from operator import attrgetter
from bpy.props import *
import bpy
import os
import sys
import traceback
import nodeitems_utils
# registers
from . import rman_bl_nodes_sockets
from . import rman_bl_nodes_shaders
from . import rman_bl_nodes_ops
from . import rman_bl_nodes_props
__RMAN_DISPLAY_NODES__ = []
__RMAN_BXDF_NODES__ = []
__RMAN_DISPLACE_NODES__ = []
__RMAN_INTEGRATOR_NODES__ = []
__RMAN_PROJECTION_NODES__ = []
__RMAN_DISPLAYFILTER_NODES__ = []
__RMAN_SAMPLEFILTER_NODES__ = []
__RMAN_PATTERN_NODES__ = []
__RMAN_LIGHT_NODES__ = []
__RMAN_LIGHTFILTER_NODES__ = []
__RMAN_NODE_TYPES__ = dict()
__RMAN_NODE_CATEGORIES__ = dict()
__RMAN_NODE_CATEGORIES__['bxdf'] = ('RenderMan Bxdfs', [])
__RMAN_NODE_CATEGORIES__['light'] = ('RenderMan Lights', [])
__RMAN_NODE_CATEGORIES__['patterns_misc'] = ('RenderMan Misc Patterns', [])
__RMAN_NODE_CATEGORIES__['displace'] = ('RenderMan Displacements', [])
__RMAN_NODES__ = {
'displaydriver': __RMAN_DISPLAY_NODES__,
'bxdf': __RMAN_BXDF_NODES__,
'displace': __RMAN_DISPLACE_NODES__,
'integrator': __RMAN_INTEGRATOR_NODES__,
'projection': __RMAN_PROJECTION_NODES__,
'displayfilter': __RMAN_DISPLAYFILTER_NODES__,
'samplefilter': __RMAN_SAMPLEFILTER_NODES__,
'pattern': __RMAN_PATTERN_NODES__,
'light': __RMAN_LIGHT_NODES__,
'lightfilter': __RMAN_LIGHTFILTER_NODES__
}
__RMAN_PLUGIN_MAPPING__ = {
'integrator': rman_properties_scene.RendermanSceneSettings,
'displaydriver': rman_properties_renderlayers.RendermanAOV,
'projection': rman_properties_camera.RendermanCameraSettings,
'light': rman_bl_nodes_props.RendermanLightSettings,
'lightfilter': rman_bl_nodes_props.RendermanLightSettings,
'displayfilter': rman_bl_nodes_props.RendermanDisplayFilterSettings,
'samplefilter': rman_bl_nodes_props.RendermanSampleFilterSettings,
}
def update_conditional_visops(node):
for param_name, prop_meta in getattr(node, 'prop_meta').items():
if 'conditionalVisOp' in prop_meta:
cond_expr = prop_meta['conditionalVisOp']['expr']
try:
hidden = not eval(cond_expr)
prop_meta['hidden'] = hidden
if hasattr(node, 'inputs') and param_name in node.inputs:
node.inputs[param_name].hide = hidden
except:
print("Error in conditional visop: %s" % (cond_expr))
def assetid_update_func(self, context):
node = self.node if hasattr(self, 'node') else self
light = None
active = context.active_object
if active.type == 'LIGHT':
light = active.data
texture_utils.update_texture(node, light=light)
if context and hasattr(context, 'material'):
mat = context.material
if mat:
node.update_mat(mat)
elif context and hasattr(context, 'node'):
mat = context.space_data.id
if mat:
node.update_mat(mat)
def update_func_with_inputs(self, context):
# check if this prop is set on an input
node = self.node if hasattr(self, 'node') else self
if context and hasattr(context, 'material'):
mat = context.material
if mat:
node.update_mat(mat)
elif context and hasattr(context, 'node'):
mat = context.space_data.id
if mat:
node.update_mat(mat)
# update the conditional_vis_ops
update_conditional_visops(node)
if node.bl_idname in ['PxrLayerPatternNode', 'PxrSurfaceBxdfNode']:
node_add_inputs(node, node.name, node.prop_names)
else:
update_inputs(node)
# set any inputs that are visible and param is hidden to hidden
prop_meta = getattr(node, 'prop_meta')
if hasattr(node, 'inputs'):
for input_name, socket in node.inputs.items():
if 'hidden' in prop_meta[input_name]:
socket.hide = prop_meta[input_name]['hidden']
def update_func(self, context):
# check if this prop is set on an input
node = self.node if hasattr(self, 'node') else self
if context and hasattr(context, 'material'):
mat = context.material
if mat:
node.update_mat(mat)
elif context and hasattr(context, 'node'):
mat = context.space_data.id
if mat:
node.update_mat(mat)
# update the conditional_vis_ops
update_conditional_visops(node)
# set any inputs that are visible and param is hidden to hidden
prop_meta = getattr(node, 'prop_meta')
if hasattr(node, 'inputs'):
for input_name, socket in node.inputs.items():
if input_name not in prop_meta:
continue
if 'hidden' in prop_meta[input_name] \
and prop_meta[input_name]['hidden'] and not socket.hide:
socket.hide = True
def update_integrator_func(self, context):
rr = rman_render.RmanRender.get_rman_render()
if rr.rman_interactive_running:
rr.rman_scene.update_integrator(context)
def class_generate_properties(node, parent_name, node_desc):
prop_names = []
prop_meta = {}
output_meta = OrderedDict()
if "__annotations__" not in node.__dict__:
setattr(node, "__annotations__", {})
# pxr osl and seexpr need these to find the code
if parent_name in ["PxrOSL", "PxrSeExpr"]:
# Enum for internal, external type selection
EnumName = "codetypeswitch"
if parent_name == 'PxrOSL':
EnumProp = EnumProperty(items=(('EXT', "External", ""),
('INT', "Internal", "")),
name="Shader Location", default='INT')
else:
EnumProp = EnumProperty(items=(('NODE', "Node", ""),
('INT', "Internal", "")),
name="Expr Location", default='NODE')
EnumMeta = {'renderman_name': 'filename',
'name': 'codetypeswitch',
'renderman_type': 'string',
'default': '', 'label': 'codetypeswitch',
'type': 'enum', 'options': '',
'widget': 'mapper', '__noconnection': True}
node.__annotations__[EnumName] = EnumProp
prop_names.append(EnumName)
prop_meta[EnumName] = EnumMeta
# Internal file search prop
InternalName = "internalSearch"
InternalProp = StringProperty(name="Shader to use",
description="Storage space for internal text data block",
default="")
InternalMeta = {'renderman_name': 'filename',
'name': 'internalSearch',
'renderman_type': 'string',
'default': '', 'label': 'internalSearch',
'type': 'string', 'options': '',
'widget': 'fileinput', '__noconnection': True}
node.__annotations__[InternalName] = InternalProp
prop_names.append(InternalName)
prop_meta[InternalName] = InternalMeta
# External file prop
codeName = "shadercode"
codeProp = StringProperty(name='External File', default='',
subtype="FILE_PATH", description='')
codeMeta = {'renderman_name': 'filename',
'name': 'ShaderCode', 'renderman_type': 'string',
'default': '', 'label': 'ShaderCode',
'type': 'string', 'options': '',
'widget': 'fileinput', '__noconnection': True}
node.__annotations__[codeName] = codeProp
prop_names.append(codeName)
prop_meta[codeName] = codeMeta
# inputs
for node_desc_param in node_desc.params:
update_function = None
if node_desc.node_type == 'integrator':
update_function = update_integrator_func
else:
param_widget = node_desc_param.widget.lower() if hasattr(node_desc_param,'widget') else 'default'
if param_widget == 'fileinput' or param_widget == 'assetidinput' or (param_widget == 'default' and node_desc_param.name == 'filename'):
update_function = assetid_update_func
else:
update_function = update_func_with_inputs if 'enable' in node_desc_param.name else update_func
if not update_function:
update_function = update_func
name, meta, prop = property_utils.generate_property(node_desc_param, update_function=update_function)
if name is None:
continue
if hasattr(node_desc_param, 'page') and node_desc_param.page != '':
page = node_desc_param.page
tokens = page.split('|')
sub_prop_names = prop_names
page_name = tokens[0]
if page_name not in prop_meta:
sub_prop_names.append(page_name)
prop_meta[page_name] = {'renderman_type': 'page'}
ui_label = "%s_uio" % page_name
node.__annotations__[ui_label] = BoolProperty(name=ui_label, default=False)
setattr(node, page_name, [])
if parent_name == 'PxrSurface' and 'Globals' not in page_name:
enable_param_name = 'enable' + page_name.replace(' ', '')
if enable_param_name not in prop_meta:
prop_meta[enable_param_name] = {
'renderman_type': 'enum', 'renderman_name': enable_param_name}
default = page_name == 'Diffuse'
enable_param_prop = BoolProperty(name="Enable " + page_name,
default=bool(default),
update=update_func_with_inputs)
node.__annotations__[enable_param_name] = enable_param_prop
page_prop_names = getattr(node, page_name)
if enable_param_name not in page_prop_names:
page_prop_names.append(enable_param_name)
setattr(node, page_name, page_prop_names)
if len(tokens) > 1:
for i in range(1, len(tokens)):
parent_page = page_name
page_name += '.' + tokens[i]
if page_name not in prop_meta:
prop_meta[page_name] = {'renderman_type': 'page'}
ui_label = "%s_uio" % page_name
node.__annotations__[ui_label] = BoolProperty(name=ui_label, default=False)
setattr(node, page_name, [])
sub_prop_names = getattr(node, parent_page)
if page_name not in sub_prop_names:
sub_prop_names.append(page_name)
setattr(node, parent_page, sub_prop_names)
sub_prop_names = getattr(node, page_name)
sub_prop_names.append(name)
setattr(node, page_name, sub_prop_names)
prop_meta[name] = meta
node.__annotations__[name] = prop
else:
prop_names.append(name)
prop_meta[name] = meta
node.__annotations__[name] = prop
# outputs
for node_desc_param in node_desc.outputs:
renderman_type = node_desc_param.type
prop_name = node_desc_param.name
output_prop_meta = dict()
if hasattr(node_desc_param, 'vstructmember'):
output_prop_meta['vstructmember'] = node_desc_param.vstructmember
if hasattr(node_desc_param, 'vstructConditionalExpr'):
output_prop_meta['vstructConditionalExpr'] = node_desc_param.vstructConditionalExpr
output_prop_meta['name'] = node_desc_param.name
output_meta[prop_name] = output_prop_meta #node_desc_param
output_meta[prop_name]['renderman_type'] = renderman_type
setattr(node, 'prop_names', prop_names)
setattr(node, 'prop_meta', prop_meta)
setattr(node, 'output_meta', output_meta)
def generate_node_type(node_desc):
''' Dynamically generate a node type from pattern '''
name = node_desc.name
nodeType = node_desc.node_type #args.find("shaderType/tag").attrib['value']
typename = '%s%sNode' % (name, nodeType.capitalize())
nodeDict = {'bxdf': rman_bl_nodes_shaders.RendermanBxdfNode,
'pattern': rman_bl_nodes_shaders.RendermanPatternNode,
'displace': rman_bl_nodes_shaders.RendermanDisplacementNode,
'light': rman_bl_nodes_shaders.RendermanLightNode}
if nodeType not in nodeDict.keys():
return (None, None)
ntype = type(typename, (nodeDict[nodeType],), {})
ntype.bl_label = name
ntype.typename = typename
def init(self, context):
if self.renderman_node_type == 'bxdf':
self.outputs.new('RendermanShaderSocket', "Bxdf").type = 'SHADER'
#socket_template = self.socket_templates.new(identifier='Bxdf', name='Bxdf', type='SHADER')
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
# if this is PxrLayerSurface set the diffusegain to 0. The default
# of 1 is unintuitive
if self.plugin_name == 'PxrLayerSurface':
self.diffuseGain = 0
elif self.renderman_node_type == 'light':
# only make a few sockets connectable
node_add_inputs(self, name, self.prop_names)
self.outputs.new('RendermanShaderSocket', "Light")
elif self.renderman_node_type == 'displace':
# only make the color connectable
self.outputs.new('RendermanShaderSocket', "Displacement")
node_add_inputs(self, name, self.prop_names)
# else pattern
elif name == "PxrOSL":
self.outputs.clear()
else:
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
if name == "PxrRamp":
node_group = bpy.data.node_groups.new(
'PxrRamp_nodegroup', 'ShaderNodeTree')
node_group.nodes.new('ShaderNodeValToRGB')
node_group.use_fake_user = True
self.node_group = node_group.name
update_conditional_visops(self)
def free(self):
if name == "PxrRamp":
bpy.data.node_groups.remove(bpy.data.node_groups[self.node_group])
ntype.init = init
ntype.free = free
if "__annotations__" not in ntype.__dict__:
setattr(ntype, "__annotations__", {})
if name == 'PxrRamp':
ntype.__annotations__['node_group'] = StringProperty('color_ramp', default='')
ntype.__annotations__['plugin_name'] = StringProperty(name='Plugin Name',
default=name, options={'HIDDEN'})
# lights cant connect to a node tree in 20.0
class_generate_properties(ntype, name, node_desc)
if nodeType == 'light':
ntype.__annotations__['light_shading_rate'] = FloatProperty(
name="Light Shading Rate",
description="Shading Rate for this light. \
Leave this high unless detail is missing",
default=100.0)
ntype.__annotations__['light_primary_visibility'] = BoolProperty(
name="Light Primary Visibility",
description="Camera visibility for this light",
default=True)
bpy.utils.register_class(ntype)
return (typename, ntype)
def register_plugin_to_parent(ntype, name, node_desc, plugin_type, parent):
class_generate_properties(ntype, name, node_desc)
setattr(ntype, 'renderman_node_type', plugin_type)
if "__annotations__" not in parent.__dict__:
setattr(parent, "__annotations__", {})
# register and add to scene_settings
bpy.utils.register_class(ntype)
settings_name = "%s_settings" % name
parent.__annotations__["%s_settings" % name] = PointerProperty(type=ntype, name="%s Settings" % name)
if "__annotations__" not in rman_properties_world.RendermanWorldSettings.__dict__:
setattr(rman_properties_world.RendermanWorldSettings, "__annotations__", {})
# special case for world lights
if plugin_type == 'light' and name in ['PxrDomeLight', 'PxrEnvDayLight']:
rman_properties_world.RendermanWorldSettings.__annotations__["%s_settings" % name] = PointerProperty(type=ntype, name="%s Settings" % name)
def register_plugin_types(node_desc):
items = []
if node_desc.node_type not in __RMAN_PLUGIN_MAPPING__:
return
parent = __RMAN_PLUGIN_MAPPING__[node_desc.node_type]
name = node_desc.name
if node_desc.node_type == 'displaydriver':
# remove the d_ prefix
name = name.split('d_')[1]
typename = name + node_desc.node_type.capitalize() + 'Settings'
ntype = type(typename, (rman_bl_nodes_props.RendermanPluginSettings,), {})
ntype.bl_label = name
ntype.typename = typename
ntype.bl_idname = typename
ntype.plugin_name = name
try:
register_plugin_to_parent(ntype, name, node_desc, node_desc.node_type, parent)
except Exception as e:
rfb_log().error("Error registering plugin ", name)
traceback.print_exc()
def get_path_list():
paths = []
rmantree = filepath_utils.guess_rmantree()
paths.append(os.path.join(rmantree, 'lib', 'plugins'))
paths.append(os.path.join(rmantree, 'lib', 'shaders'))
paths.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'Args'))
if 'RMAN_RIXPLUGINPATH' in os.environ:
RMAN_RIXPLUGINPATH = os.environ['RMAN_RIXPLUGINPATH']
for p in RMAN_RIXPLUGINPATH.split(':'):
paths.append(os.path.join(p, 'Args'))
if 'RMAN_SHADERPATH' in os.environ:
RMAN_SHADERPATH = os.environ['RMAN_SHADERPATH']
for p in RMAN_SHADERPATH.split(':'):
paths.append(p)
return paths
class RendermanPatternNodeCategory(NodeCategory):
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'ShaderNodeTree'
def register_rman_nodes():
global __RMAN_NODE_CATEGORIES__
rfb_log().debug("Registering RenderMan Plugin Nodes:")
path_list = get_path_list()
for path in path_list:
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if filename.endswith(('.args', '.oso')):
node_desc = NodeDesc(FilePath(root).join(FilePath(filename)))
__RMAN_NODES__[node_desc.node_type].append(node_desc)
rfb_log().debug("\t%s" % node_desc.name)
# These plugin types are special. They are not actually shading
# nodes that can be used in Blender's shading editor, but
# we still create PropertyGroups for them so they can be inserted
# into the correct UI panel.
if node_desc.node_type in ['integrator', 'projection', 'displaydriver',
'displayfilter', 'samplefilter',
'light',
'lightfilter']:
register_plugin_types(node_desc)
if node_desc.name != 'PxrMeshLight':
# for mesh light, we need to create a shader graph node
continue
typename, nodetype = generate_node_type(node_desc)
if not typename and not nodetype:
continue
if typename and nodetype:
__RMAN_NODE_TYPES__[typename] = nodetype
# categories
node_item = NodeItem(typename, label=nodetype.bl_label)
if node_desc.node_type == 'pattern':
if hasattr(node_desc, 'classification'):
try:
tokens = node_desc.classification.split('/')
category = tokens[-1].lower()
# category seems empty. Put in misc
if category == '':
category = 'misc'
if node_desc.name not in ['PxrLayer', 'PxrLayerMixer']:
# append OSL to the category if these are osl shaders, except
# for PxrLayer and PxrLayerMixer
if filename.endswith('.oso'):
category = 'OSL_%s' % category
lst = __RMAN_NODE_CATEGORIES__.get('patterns_%s' % category, None)
if not lst:
lst = ('RenderMan %s Patterns' % category.capitalize(), [])
lst[1].append(node_item)
__RMAN_NODE_CATEGORIES__['patterns_%s' % category] = lst
except Exception as e:
pass
else:
__RMAN_NODE_CATEGORIES__['patterns_misc'][1].append(node_item)
elif node_desc.node_type == 'bxdf':
__RMAN_NODE_CATEGORIES__['bxdf'][1].append(node_item)
elif node_desc.node_type == 'displace':
__RMAN_NODE_CATEGORIES__['displace'][1].append(node_item)
elif node_desc.node_type == 'light':
__RMAN_NODE_CATEGORIES__['light'][1].append(node_item)
rfb_log().debug("Finished Registering RenderMan Plugin Nodes.")
# all categories in a list
node_categories = [
# identifier, label, items list
RendermanPatternNodeCategory("PRMan_output_nodes", "RenderMan Outputs",
items=[NodeItem('RendermanOutputNode', label=rman_bl_nodes_shaders.RendermanOutputNode.bl_label)]),
]
for name, (desc, items) in __RMAN_NODE_CATEGORIES__.items():
node_categories.append(RendermanPatternNodeCategory(name, desc,
items=sorted(items,
key=attrgetter('_label'))))
nodeitems_utils.register_node_categories("RENDERMANSHADERNODES",
node_categories)
def register():
register_rman_nodes()
rman_bl_nodes_props.register()
rman_bl_nodes_sockets.register()
rman_bl_nodes_shaders.register()
rman_bl_nodes_ops.register()
def unregister():
nodeitems_utils.unregister_node_categories("RENDERMANSHADERNODES")
rman_bl_nodes_props.unregister()
rman_bl_nodes_sockets.unregister()
rman_bl_nodes_shaders.unregister()
rman_bl_nodes_ops.unregister()
for cls in classes:
bpy.utils.unregister_class(cls) |
import logging
from binascii import crc32
from hashlib import sha1
from pylons import app_globals
class NoneResult(object): pass
log = logging.getLogger(__name__)
SEP="|"
cacheTags = {}
def _hash(data):
return sha1(data).hexdigest()
def add_tags(key, tags):
ctags = app_globals.cache.get_multi(tags)
for tag in tags:
if not ctags.get(tag):
ctags[tag] = key
else:
ctags[tag] = ctags[tag] + SEP + key
app_globals.cache.set_multi(ctags)
def tag_fn(key, args, kwargs):
tags = [make_tag(a) for a in args]
tags += [make_tag(v) for v in kwargs.values()]
add_tags(key, tags)
def make_tag(obj):
""" Collisisons here don't matter much. """
rep = "catch_all"
try:
rep = repr(obj)
except: pass
try:
rep = unicode(obj)
except: pass
return _hash(rep)
def make_key(iden, args, kwargs):
sig = iden[:200] + make_tag(args) + make_tag(kwargs)
return sha1(sig).hexdigest()
def clear_tag(tag):
try:
entities = app_globals.cache.get(make_tag(tag))
if entities:
app_globals.cache.delete_multi(entities.split(SEP))
except TypeError, te:
pass
def memoize(iden, time = 0):
def memoize_fn(fn):
from adhocracy.lib.cache.util import NoneResult
def new_fn(*a, **kw):
if not app_globals.cache:
res = fn(*a, **kw)
else:
key = make_key(iden, a, kw)
res = app_globals.cache.get(key)
if res is None:
res = fn(*a, **kw)
#print "Cache miss", key
if res is None:
res = NoneResult
#print "Cache set:", key
app_globals.cache.set(key, res, time = time)
tag_fn(key, a, kw)
#else:
#print "Cache hit", key
if res == NoneResult:
res = None
return res
return new_fn
return memoize_fn
[svn r1158] this should really do the trick
import logging
from binascii import crc32
from hashlib import sha1
from pylons import app_globals
class NoneResult(object): pass
log = logging.getLogger(__name__)
SEP="|"
cacheTags = {}
def _hash(data):
return sha1(data).hexdigest()
def add_tags(key, tags):
ctags = app_globals.cache.get_multi(tags)
for tag in tags:
if not ctags.get(tag):
ctags[tag] = key
else:
ctags[tag] = ctags[tag] + SEP + key
app_globals.cache.set_multi(ctags)
def tag_fn(key, args, kwargs):
tags = [make_tag(a) for a in args]
tags += [make_tag(v) for v in kwargs.values()]
add_tags(key, tags)
def make_tag(obj):
""" Collisisons here don't matter much. """
rep = "catch_all"
try:
rep = repr(obj).encode('ascii', 'ignore')
except: pass
try:
rep = unicode(obj).encode('ascii', 'ignore')
except: pass
return _hash(rep)
def make_key(iden, args, kwargs):
sig = iden[:200] + make_tag(args) + make_tag(kwargs)
return sha1(sig).hexdigest()
def clear_tag(tag):
entities = app_globals.cache.get(make_tag(tag))
if entities:
app_globals.cache.delete_multi(entities.split(SEP))
def memoize(iden, time = 0):
def memoize_fn(fn):
from adhocracy.lib.cache.util import NoneResult
def new_fn(*a, **kw):
if not app_globals.cache:
res = fn(*a, **kw)
else:
key = make_key(iden, a, kw)
res = app_globals.cache.get(key)
if res is None:
res = fn(*a, **kw)
#print "Cache miss", key
if res is None:
res = NoneResult
#print "Cache set:", key
app_globals.cache.set(key, res, time = time)
tag_fn(key, a, kw)
#else:
#print "Cache hit", key
if res == NoneResult:
res = None
return res
return new_fn
return memoize_fn
|
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "67855c4a688ee5e915267ad27b300fdd02055cf0"
TFRT_SHA256 = "481a5ebca5c856fcce534e3e1077dda772d330d7b83b126d5cd5360d45d530a3"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
Update TFRT dependency to use revision
http://github.com/tensorflow/runtime/commit/dedbee57514dd842ed30b9d6aea8811462567a52.
PiperOrigin-RevId: 401821583
Change-Id: Idb3145f656b1340c5037869a09204bc460f65c8d
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "dedbee57514dd842ed30b9d6aea8811462567a52"
TFRT_SHA256 = "abb1af4ff696ffccb730c4e210d7c35ba793788420d49d33de669dcf219527fa"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
|
import click, os, sys, shutil
from boiler.cli.colors import *
from click import echo
# -----------------------------------------------------------------------------
# Group setup
# -----------------------------------------------------------------------------
@click.group(help=yellow('Welcome to project console!'))
def cli():
pass
# -----------------------------------------------------------------------------
# Commands
# -----------------------------------------------------------------------------
@cli.command(name='run')
@click.option('--host', '-h', default='0.0.0.0', help='Bind to')
@click.option('--port', '-p', default=5000, help='Listen on port')
@click.option('--reload/--no-reload', default=True, help='Reload on change?')
@click.option('--debug/--no-debug', default=True, help='Use debugger?')
def run(host='0.0.0.0', port=5000, reload=True, debug=True):
""" Run development server """
from werkzeug.serving import run_simple
from boiler.bootstrap import create_middleware
from config.config import DevConfig
app = create_middleware(config=DevConfig())
return run_simple(
hostname=host,
port=port,
application=app,
use_reloader=reload,
use_debugger=debug,
)
@cli.command(name='shell')
def shell():
""" Start application-aware shell """
context = dict()
# mount apps
from boiler.bootstrap import create_middleware
middleware = create_middleware()
context['apps'] = dict(frontend=middleware.app)
for mount in middleware.mounts:
context['apps'][mount] = middleware.mounts[mount]
# and push app context
app_context = middleware.app.app_context()
app_context.push()
# and run
try:
from IPython import embed
embed(user_ns=context)
except ImportError:
import code
code.interact(local=context)
# -----------------------------------------------------------------------------
# Testing commands
# -----------------------------------------------------------------------------
# @todo: how can we intercept these args
nose_argv = None
if len(sys.argv) > 1 and sys.argv[1] == 'test':
nose_argv = sys.argv[2:]
sys.argv = sys.argv[:2]
@cli.command(name='test')
def test():
""" Run application tests """
from nose import run
params = ['__main__', '-c', 'nose.ini']
params.extend(nose_argv)
run(argv=params)
# -----------------------------------------------------------------------------
# Sign python
# -----------------------------------------------------------------------------
@cli.command(name='sign-python', help('Sign python executable'))
def sign_python():
"""
Sign python (MacOS)
Signing your python interpreter using self-signed certificate is used to
get rid of annoying firewall questions about whether to allow incoming
connections to the interpreter that happen on each app restart. This only
makes sense on Mac. In order to use this command you must first create
a certificate to sign your code with. To do it:
1. Open Keychain Access
2. Choose: Keychain Access > Certificate Assistant > Create Certificate
3. Important: Use your current username for certificate name (id -un)
4. Select Certificate Type: Code Signing
5. Select Type: Self Signed Root
6. Check 'Let me override defaults' box
7. Click Continue, and give it a serial number (maximum randomness)
8. Accept defaults for the rest
You will only need to do this once. After this is done you can use
generated certificate to sign your Python in any project.
"""
from subprocess import check_output
from os import system
echo(green('\nSign python:'))
echo(green('-' * 40))
# get python
python = check_output(['which', 'python']).decode().replace('\n', '')
echo('Interpreter: ' + yellow(python))
# get certificate name
username = check_output(['id', '-un']).decode().replace('\n', '')
echo('Using certificate: ' + yellow(username) + '\n')
# signing
cert = '"{}"'.format(username)
cmd = "codesign -s {cert} -f {python}".format(cert=cert, python=python)
system(cmd)
# -----------------------------------------------------------------------------
# Init project
# -----------------------------------------------------------------------------
@cli.command(name='init')
@click.argument('destination', type=click.Path(exists=True))
@click.option('--force', '-f',
default=False,
is_flag=True,
help='Skip existing objects in destination'
)
@click.option('--skip', '-s',
default=False,
is_flag=True,
help='Skip existing objects in dstination'
)
def init(destination, force=False, skip=True):
""" Initialise new project """
import os
ignores = ['.DS_Store', '__pycache__']
echo(green('\nInitialise project:'))
echo(green('-' * 40))
destination = os.path.realpath(destination)
source = os.path.realpath(os.path.dirname(__file__) + '/../boiler_template')
# dry run first
exist_in_dst = []
for path, dirs, files in os.walk(source):
for dir in dirs:
if dir in ignores:
continue
dst = os.path.join(path, dir).replace(source, destination)
if os.path.exists(dst):
exist_in_dst.append(dst)
for file in files:
if file in ignores:
continue
dst = os.path.join(path, file).replace(source, destination)
if os.path.exists(dst):
exist_in_dst.append(dst)
# require force option if existing files found
if exist_in_dst and not force and not skip:
msg = 'The following objects were found in destination.'
msg += 'What do you want to do with these?'
echo(red(msg))
echo(red('Use either --force or --skip option \n'))
for index,path in enumerate(exist_in_dst):
print(yellow('{}. {}'.format(index, path)))
echo()
return
for path, dirs, files in os.walk(source):
for dir in dirs:
if dir in ignores:
continue
src = os.path.join(path, dir)
dst = src.replace(source, destination)
if('__pycache__' in src):
continue
if dst in exist_in_dst and force:
print(red('OVERWRITING: ' + dst))
if os.path.exists(dst):
shutil.rmtree(dst, ignore_errors=True)
os.makedirs(dst)
elif dst in exist_in_dst and skip:
print(yellow('SKIPPING: ' + dst))
else:
print('CREATING: ' + dst)
os.makedirs(dst)
for file in files:
if file in ignores:
continue
src = os.path.join(path, file)
dst = src.replace(source, destination)
if('__pycache__' in src):
continue
if dst in exist_in_dst and force:
print(red('OVERWRITING: ' + dst))
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
elif dst in exist_in_dst and skip:
print(yellow('SKIPPING: ' + dst))
else:
print('CREATING: ' + dst)
shutil.copy(src, dst)
echo()
return
Fixie
import click, os, sys, shutil
from boiler.cli.colors import *
from click import echo
# -----------------------------------------------------------------------------
# Group setup
# -----------------------------------------------------------------------------
@click.group(help=yellow('Welcome to project console!'))
def cli():
pass
# -----------------------------------------------------------------------------
# Commands
# -----------------------------------------------------------------------------
@cli.command(name='run')
@click.option('--host', '-h', default='0.0.0.0', help='Bind to')
@click.option('--port', '-p', default=5000, help='Listen on port')
@click.option('--reload/--no-reload', default=True, help='Reload on change?')
@click.option('--debug/--no-debug', default=True, help='Use debugger?')
def run(host='0.0.0.0', port=5000, reload=True, debug=True):
""" Run development server """
from werkzeug.serving import run_simple
from boiler.bootstrap import create_middleware
from config.config import DevConfig
app = create_middleware(config=DevConfig())
return run_simple(
hostname=host,
port=port,
application=app,
use_reloader=reload,
use_debugger=debug,
)
@cli.command(name='shell')
def shell():
""" Start application-aware shell """
context = dict()
# mount apps
from boiler.bootstrap import create_middleware
middleware = create_middleware()
context['apps'] = dict(frontend=middleware.app)
for mount in middleware.mounts:
context['apps'][mount] = middleware.mounts[mount]
# and push app context
app_context = middleware.app.app_context()
app_context.push()
# and run
try:
from IPython import embed
embed(user_ns=context)
except ImportError:
import code
code.interact(local=context)
# -----------------------------------------------------------------------------
# Testing commands
# -----------------------------------------------------------------------------
# @todo: how can we intercept these args
nose_argv = None
if len(sys.argv) > 1 and sys.argv[1] == 'test':
nose_argv = sys.argv[2:]
sys.argv = sys.argv[:2]
@cli.command(name='test')
def test():
""" Run application tests """
from nose import run
params = ['__main__', '-c', 'nose.ini']
params.extend(nose_argv)
run(argv=params)
# -----------------------------------------------------------------------------
# Sign python
# -----------------------------------------------------------------------------
@cli.command(name='sign-python', help('Sign python executable'))
def sign_python():
"""
Sign python (MacOS)
Signing your python interpreter using self-signed certificate is used to
get rid of annoying firewall questions about whether to allow incoming
connections to the interpreter that happen on each app restart. This only
makes sense on Mac. In order to use this command you must first create
a certificate to sign your code with. To do it:
1. Open Keychain Access
2. Choose: Keychain Access > Certificate Assistant > Create Certificate
3. Important: Use your current username for certificate name (id -un)
4. Select Certificate Type: Code Signing
5. Select Type: Self Signed Root
6. Check 'Let me override defaults' box
7. Click Continue, and give it a serial number (maximum randomness)
8. Accept defaults for the rest
You will only need to do this once. After this is done you can use
generated certificate to sign your Python in any project.
"""
from subprocess import check_output
from os import system
echo(green('\nSign python:'))
echo(green('-' * 40))
# get python
python = check_output(['which', 'python']).decode().replace('\n', '')
echo('Interpreter: ' + yellow(python))
# get certificate name
username = check_output(['id', '-un']).decode().replace('\n', '')
echo('Using certificate: ' + yellow(username) + '\n')
# signing
cert = '"{}"'.format(username)
cmd = "codesign -s {cert} -f {python}".format(cert=cert, python=python)
system(cmd)
echo(green('\nDONE\n'))
# -----------------------------------------------------------------------------
# Init project
# -----------------------------------------------------------------------------
@cli.command(name='init')
@click.argument('destination', type=click.Path(exists=True))
@click.option('--force', '-f',
default=False,
is_flag=True,
help='Skip existing objects in destination'
)
@click.option('--skip', '-s',
default=False,
is_flag=True,
help='Skip existing objects in dstination'
)
def init(destination, force=False, skip=True):
""" Initialise new project """
import os
ignores = ['.DS_Store', '__pycache__']
echo(green('\nInitialise project:'))
echo(green('-' * 40))
destination = os.path.realpath(destination)
source = os.path.realpath(os.path.dirname(__file__) + '/../boiler_template')
# dry run first
exist_in_dst = []
for path, dirs, files in os.walk(source):
for dir in dirs:
if dir in ignores:
continue
dst = os.path.join(path, dir).replace(source, destination)
if os.path.exists(dst):
exist_in_dst.append(dst)
for file in files:
if file in ignores:
continue
dst = os.path.join(path, file).replace(source, destination)
if os.path.exists(dst):
exist_in_dst.append(dst)
# require force option if existing files found
if exist_in_dst and not force and not skip:
msg = 'The following objects were found in destination.'
msg += 'What do you want to do with these?'
echo(red(msg))
echo(red('Use either --force or --skip option \n'))
for index,path in enumerate(exist_in_dst):
print(yellow('{}. {}'.format(index, path)))
echo()
return
for path, dirs, files in os.walk(source):
for dir in dirs:
if dir in ignores:
continue
src = os.path.join(path, dir)
dst = src.replace(source, destination)
if('__pycache__' in src):
continue
if dst in exist_in_dst and force:
print(red('OVERWRITING: ' + dst))
if os.path.exists(dst):
shutil.rmtree(dst, ignore_errors=True)
os.makedirs(dst)
elif dst in exist_in_dst and skip:
print(yellow('SKIPPING: ' + dst))
else:
print('CREATING: ' + dst)
os.makedirs(dst)
for file in files:
if file in ignores:
continue
src = os.path.join(path, file)
dst = src.replace(source, destination)
if('__pycache__' in src):
continue
if dst in exist_in_dst and force:
print(red('OVERWRITING: ' + dst))
if os.path.exists(dst):
os.remove(dst)
shutil.copy(src, dst)
elif dst in exist_in_dst and skip:
print(yellow('SKIPPING: ' + dst))
else:
print('CREATING: ' + dst)
shutil.copy(src, dst)
echo()
return
|
#!/usr/bin/python
# Copyright 2011 WebDriver committers
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for advanced user interactions."""
import os
import time
import unittest
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.remote.webdriver import WebDriver
class AdvancedUserInteractionTest(unittest.TestCase):
def performDragAndDropWithMouse(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("draggableLists")
dragReporter = self.driver.find_element_by_id("dragging_reports")
toDrag = self.driver.find_element_by_id("rightitem-3")
dragInto = self.driver.find_element_by_id("sortable1")
holdItem = ActionChains(self.driver).click_and_hold(toDrag)
moveToSpecificItem = ActionChains(self.driver) \
.move_to_element(self.driver.find_element_by_id("leftitem-4"))
moveToOtherList = ActionChains(self.driver).move_to_element(dragInto)
drop = ActionChains(self.driver).release(dragInto)
self.assertEqual("Nothing happened.", dragReporter.text)
holdItem.perform()
moveToSpecificItem.perform()
moveToOtherList.perform()
self.assertEqual("Nothing happened. DragOut", dragReporter.text)
drop.perform()
def testDraggingElementWithMouseMovesItToAnotherList(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self.performDragAndDropWithMouse()
dragInto = self.driver.find_element_by_id("sortable1")
self.assertEqual(6, len(dragInto.find_elements_by_tag_name("li")))
def _testDraggingElementWithMouseFiresEvents(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface.
Disabled since this test doesn't work with HTMLUNIT.
"""
self.performDragAndDropWithMouse()
dragReporter = self.driver.find_element_by_id("dragging_reports")
self.assertEqual("Nothing happened. DragOut DropIn RightItem 3", dragReporter.text)
def _isElementAvailable(self, id):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
try:
self.driver.find_element_by_id(id)
return True
except:
return False
def testDragAndDrop(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("droppableItems")
waitEndTime = time.time() + 15
while (not self._isElementAvailable("draggable") and
time.time() < waitEndTime):
time.sleep(0.2)
if not self._isElementAvailable("draggable"):
raise "Could not find draggable element after 15 seconds."
toDrag = self.driver.find_element_by_id("draggable")
dropInto = self.driver.find_element_by_id("droppable")
holdDrag = ActionChains(self.driver) \
.click_and_hold(toDrag)
move = ActionChains(self.driver) \
.move_to_element(dropInto)
drop = ActionChains(self.driver).release(dropInto)
holdDrag.perform()
move.perform()
drop.perform()
dropInto = self.driver.find_element_by_id("droppable")
text = dropInto.find_element_by_tag_name("p").text
self.assertEqual("Dropped!", text)
def testDoubleClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toDoubleClick = self.driver.find_element_by_id("doubleClickField")
dblClick = ActionChains(self.driver) \
.double_click(toDoubleClick)
dblClick.perform()
self.assertEqual("DoubleClicked", toDoubleClick.get_attribute('value'))
def testContextClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toContextClick = self.driver.find_element_by_id("doubleClickField")
contextClick = ActionChains(self.driver) \
.context_click(toContextClick)
contextClick.perform()
self.assertEqual("ContextClicked",
toContextClick.get_attribute('value'))
def testMoveAndClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toClick = self.driver.find_element_by_id("clickField")
click = ActionChains(self.driver) \
.move_to_element(toClick) \
.click()
click.perform()
self.assertEqual("Clicked", toClick.get_attribute('value'))
def testCannotMoveToANullLocator(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
try:
move = ActionChains(self.driver) \
.move_to_element(None)
move.perform()
self.fail("Shouldn't be allowed to click on null element.")
except AttributeError:
pass # Expected.
try:
ActionChains(self.driver).click().perform()
self.fail("Shouldn't be allowed to click without a context.")
except WebDriverException:
pass # Expected.
def _testClickingOnFormElements(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest.
Disabled since this test doesn't work with HTMLUNIT.
"""
self._loadPage("formSelectionPage")
options = self.driver.find_elements_by_tag_name("option")
selectThreeOptions = ActionChains(self.driver) \
.click(options[1]) \
.key_down(Keys.SHIFT) \
.click(options[2]) \
.click(options[3]) \
.key_up(Keys.SHIFT)
selectThreeOptions.perform()
showButton = self.driver.find_element_by_name("showselected")
showButton.click()
resultElement = self.driver.find_element_by_id("result")
self.assertEqual("roquefort parmigiano cheddar", resultElement.text)
def testSelectingMultipleItems(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest."""
self._loadPage("selectableItems")
reportingElement = self.driver.find_element_by_id("infodiv")
self.assertEqual("no info", reportingElement.text)
listItems = self.driver.find_elements_by_tag_name("li")
selectThreeItems = ActionChains(self.driver) \
.key_down(Keys.CONTROL) \
.click(listItems[1]) \
.click(listItems[3]) \
.click(listItems[5]) \
.key_up(Keys.CONTROL)
selectThreeItems.perform()
self.assertEqual("#item2 #item4 #item6", reportingElement.text)
# Now click on another element, make sure that's the only one selected.
actionsBuilder = ActionChains(self.driver)
actionsBuilder.click(listItems[6]).perform()
self.assertEqual("#item7", reportingElement.text)
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
DavidBurns skipping doubleClick test as it hangs the test runner while waiting for a response
r12932
#!/usr/bin/python
# Copyright 2011 WebDriver committers
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for advanced user interactions."""
import os
import time
import unittest
import pytest
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.remote.webdriver import WebDriver
class AdvancedUserInteractionTest(unittest.TestCase):
def performDragAndDropWithMouse(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("draggableLists")
dragReporter = self.driver.find_element_by_id("dragging_reports")
toDrag = self.driver.find_element_by_id("rightitem-3")
dragInto = self.driver.find_element_by_id("sortable1")
holdItem = ActionChains(self.driver).click_and_hold(toDrag)
moveToSpecificItem = ActionChains(self.driver) \
.move_to_element(self.driver.find_element_by_id("leftitem-4"))
moveToOtherList = ActionChains(self.driver).move_to_element(dragInto)
drop = ActionChains(self.driver).release(dragInto)
self.assertEqual("Nothing happened.", dragReporter.text)
holdItem.perform()
moveToSpecificItem.perform()
moveToOtherList.perform()
self.assertEqual("Nothing happened. DragOut", dragReporter.text)
drop.perform()
def testDraggingElementWithMouseMovesItToAnotherList(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self.performDragAndDropWithMouse()
dragInto = self.driver.find_element_by_id("sortable1")
self.assertEqual(6, len(dragInto.find_elements_by_tag_name("li")))
def _testDraggingElementWithMouseFiresEvents(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface.
Disabled since this test doesn't work with HTMLUNIT.
"""
self.performDragAndDropWithMouse()
dragReporter = self.driver.find_element_by_id("dragging_reports")
self.assertEqual("Nothing happened. DragOut DropIn RightItem 3", dragReporter.text)
def _isElementAvailable(self, id):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
try:
self.driver.find_element_by_id(id)
return True
except:
return False
def testDragAndDrop(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("droppableItems")
waitEndTime = time.time() + 15
while (not self._isElementAvailable("draggable") and
time.time() < waitEndTime):
time.sleep(0.2)
if not self._isElementAvailable("draggable"):
raise "Could not find draggable element after 15 seconds."
toDrag = self.driver.find_element_by_id("draggable")
dropInto = self.driver.find_element_by_id("droppable")
holdDrag = ActionChains(self.driver) \
.click_and_hold(toDrag)
move = ActionChains(self.driver) \
.move_to_element(dropInto)
drop = ActionChains(self.driver).release(dropInto)
holdDrag.perform()
move.perform()
drop.perform()
dropInto = self.driver.find_element_by_id("droppable")
text = dropInto.find_element_by_tag_name("p").text
self.assertEqual("Dropped!", text)
def testDoubleClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
pytest.skip("doubleClick is failing server-side")
self._loadPage("javascriptPage")
toDoubleClick = self.driver.find_element_by_id("doubleClickField")
dblClick = ActionChains(self.driver) \
.double_click(toDoubleClick)
dblClick.perform()
self.assertEqual("DoubleClicked", toDoubleClick.get_attribute('value'))
def testContextClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toContextClick = self.driver.find_element_by_id("doubleClickField")
contextClick = ActionChains(self.driver) \
.context_click(toContextClick)
contextClick.perform()
self.assertEqual("ContextClicked",
toContextClick.get_attribute('value'))
def testMoveAndClick(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
toClick = self.driver.find_element_by_id("clickField")
click = ActionChains(self.driver) \
.move_to_element(toClick) \
.click()
click.perform()
self.assertEqual("Clicked", toClick.get_attribute('value'))
def testCannotMoveToANullLocator(self):
"""Copied from org.openqa.selenium.interactions.TestBasicMouseInterface."""
self._loadPage("javascriptPage")
try:
move = ActionChains(self.driver) \
.move_to_element(None)
move.perform()
self.fail("Shouldn't be allowed to click on null element.")
except AttributeError:
pass # Expected.
try:
ActionChains(self.driver).click().perform()
self.fail("Shouldn't be allowed to click without a context.")
except WebDriverException:
pass # Expected.
def _testClickingOnFormElements(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest.
Disabled since this test doesn't work with HTMLUNIT.
"""
self._loadPage("formSelectionPage")
options = self.driver.find_elements_by_tag_name("option")
selectThreeOptions = ActionChains(self.driver) \
.click(options[1]) \
.key_down(Keys.SHIFT) \
.click(options[2]) \
.click(options[3]) \
.key_up(Keys.SHIFT)
selectThreeOptions.perform()
showButton = self.driver.find_element_by_name("showselected")
showButton.click()
resultElement = self.driver.find_element_by_id("result")
self.assertEqual("roquefort parmigiano cheddar", resultElement.text)
def testSelectingMultipleItems(self):
"""Copied from org.openqa.selenium.interactions.CombinedInputActionsTest."""
self._loadPage("selectableItems")
reportingElement = self.driver.find_element_by_id("infodiv")
self.assertEqual("no info", reportingElement.text)
listItems = self.driver.find_elements_by_tag_name("li")
selectThreeItems = ActionChains(self.driver) \
.key_down(Keys.CONTROL) \
.click(listItems[1]) \
.click(listItems[3]) \
.click(listItems[5]) \
.key_up(Keys.CONTROL)
selectThreeItems.perform()
self.assertEqual("#item2 #item4 #item6", reportingElement.text)
# Now click on another element, make sure that's the only one selected.
actionsBuilder = ActionChains(self.driver)
actionsBuilder.click(listItems[6]).perform()
self.assertEqual("#item7", reportingElement.text)
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
import os, shutil
from abc import ABCMeta, abstractmethod
from shiftmedia import exceptions as x
from pathlib import Path
class Backend(metaclass=ABCMeta):
"""
Abstract backend
This defines methods your backend must implement in order to
work with media storage
"""
# TODO: implement s3 backend
# TODO: implement clearing generated files in backend
@abstractmethod
def __init__(self, url='http://localhost'):
"""
Backend constructor
Requires a base storage url to build links.
:param url: string - base storage url
"""
self._url = url
def get_url(self):
"""
Get URL
Returns base URL of storage
"""
return self._url
@abstractmethod
def put(self, src, id, force=False):
"""
Put file to storage
Does not require a filename as it will be extracted from provided id.
Will raise an exception on an attempt to overwrite existing file which
you can force to ignore.
"""
pass
@abstractmethod
def put_variant(self, src, id, filename, force=False):
"""
Put file variant to storage
Save local file in storage under given id and filename. Will raise an
exception on an attempt to overwrite existing file which you can force
to ignore.
"""
pass
@abstractmethod
def retrieve_original(self, id, local_path):
"""
Retrieve original
Download file original from storage and put to local temp path
"""
pass
@abstractmethod
def delete(self, id):
"""
Delete
Remove file from storage by id
"""
pass
@abstractmethod
def parse_url(self, url):
"""
Parse url
Processes url to return a tuple of id and filename. This is being used
when we create dynamic image resizes to retrieve the original based on
resize filename.
:param url: string - resize url
:return: tuple - id and filename
"""
pass
class BackendLocal(Backend):
"""
Local backend
Stores file locally in a directory without transferring to remote storage
"""
def __init__(self, local_path=None, url='http://localhost'):
"""
Backend constructor
Requires a local storage path and base storage url.
:param local_path: string - where to store files
:param url: string - base storage url
"""
super().__init__(url)
self._path = local_path
@property
def path(self):
"""
Get path
Returns path to local storage and creates one if necessary
"""
if not os.path.exists(self._path):
os.makedirs(self._path)
return self._path
def id_to_path(self, id):
"""
Id to path
Returns a list of directories extracted from id
:param id: string, - object id
:return: list
"""
parts = id.lower().split('-')[0:5]
tail = id[len('-'.join(parts)) + 1:]
parts.append(tail)
return parts
def parse_url(self, url):
"""
Parse url
Processes url to return a tuple of id and filename. This is being used
when we create dynamic image resizes to retrieve the original based on
resize filename.
:param url: string - resize url
:return: tuple - id and filename
"""
url = url.replace(self._url, '')
url = url.strip('/').lower()
url = url.split('/')
id = '-'.join(url[:-1])
filename = url[-1]
return id, filename
def put(self, src, id, force=False):
"""
Put file to storage
Does not require a filename as it will be extracted from provided id.
the resulting path will have following structure:
3c72aedc/ba25/11e6/569/406c8f413974/original-filename.jpg
:param src: string - path to source file
:param id: string - generated id
:param force: bool - whether to overwrite existing
:return: string - generated id
"""
filename = '-'.join(id.split('-')[5:])
return self.put_variant(src, id, filename, force)
def put_variant(self, src, id, filename, force=False):
"""
Put file variant to storage
Save local file in storage under given id and filename. Will raise an
exception on an attempt to overwrite existing file which you can force
to ignore.
"""
if not os.path.exists(src):
msg = 'Unable to find local file [{}]'
raise x.LocalFileNotFound(msg.format(src))
parts = self.id_to_path(id)
dir = os.path.join(self.path, *parts)
os.makedirs(dir, exist_ok=True)
dst = os.path.join(self.path, *parts, filename.lower())
if not force and os.path.exists(dst):
msg = 'File [' + filename + '] exists under [' + id + ']. '
msg += 'Use force option to overwrite.'
raise x.FileExists(msg)
shutil.copyfile(src, dst)
return id
def delete(self, id):
"""
Delete
Remove file from storage by id
"""
id = str(id)
path = os.path.join(self.path, *id.split('-')[0:5])
shutil.rmtree(path)
return True
def retrieve_original(self, id, local_path):
"""
Retrieve original
Download file from storage and put to local temp path
"""
path = self.id_to_path(id)
filename = path[5]
src = os.path.join(self.path, *path, filename)
dst_dir = os.path.join(local_path, id)
dst = os.path.join(dst_dir, filename)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, exist_ok=True)
shutil.copyfile(src, dst)
return dst
class BackendS3(Backend):
"""
Amazon S3 backend
Stores files in an amazon s3 bucket
"""
def __init__(self, url='http://localhost'):
"""
Backend constructor
:param url: string - base storage url
"""
super().__init__(url)
@property
def path(self):
"""
Get path
Returns path to local storage and creates one if necessary
"""
pass
def id_to_path(self, id):
"""
Id to path
Returns a list of directories extracted from id
:param id: string, - object id
:return: list
"""
pass
def parse_url(self, url):
"""
Parse url
Processes url to return a tuple of id and filename. This is being used
when we create dynamic image resizes to retrieve the original based on
resize filename.
:param url: string - resize url
:return: tuple - id and filename
"""
pass
def put_original(self, src, id, force=False):
"""
Put original file to storage
Does not require a filename as it will be extracted from provided id.
the resulting path will have following structure:
3c72aedc/ba25/11e6/569/406c8f413974/original-filename.jpg
:param src: string - path to source file
:param id: string - generated id
:param force: bool - whether to overwrite existing
:return: string - generated id
"""
pass
def put(self, src, id, filename, force=False):
"""
Put file to storage
Save local file in storage under given id and filename. Will raise an
exception on an attempt to overwrite existing file which you can force
to ignore.
"""
pass
def delete(self, id):
"""
Delete
Remove file from storage by id
"""
pass
def retrieve_original(self, id, local_path):
"""
Retrieve original
Download file from storage and put to local temp path
"""
pass
S3 tests scaffolding
import os, shutil
from abc import ABCMeta, abstractmethod
from shiftmedia import exceptions as x
from pathlib import Path
class Backend(metaclass=ABCMeta):
"""
Abstract backend
This defines methods your backend must implement in order to
work with media storage
"""
# TODO: implement s3 backend
# TODO: implement clearing generated files in backend
@abstractmethod
def __init__(self, local_path, url='http://localhost'):
"""
Backend constructor
Requires a base storage url to build links.
:param local_path: string - path to local temp dir
:param url: string - base storage url
"""
self._url = url
self._path = local_path
@property
def path(self):
"""
Get path
Returns path to local storage and creates one if necessary
"""
if not os.path.exists(self._path):
os.makedirs(self._path)
return self._path
def get_url(self):
"""
Get URL
Returns base URL of storage
"""
return self._url
@abstractmethod
def put(self, src, id, force=False):
"""
Put file to storage
Does not require a filename as it will be extracted from provided id.
Will raise an exception on an attempt to overwrite existing file which
you can force to ignore.
"""
pass
@abstractmethod
def put_variant(self, src, id, filename, force=False):
"""
Put file variant to storage
Save local file in storage under given id and filename. Will raise an
exception on an attempt to overwrite existing file which you can force
to ignore.
"""
pass
@abstractmethod
def retrieve_original(self, id, local_path):
"""
Retrieve original
Download file original from storage and put to local temp path
"""
pass
@abstractmethod
def delete(self, id):
"""
Delete
Remove file from storage by id
"""
pass
@abstractmethod
def parse_url(self, url):
"""
Parse url
Processes url to return a tuple of id and filename. This is being used
when we create dynamic image resizes to retrieve the original based on
resize filename.
:param url: string - resize url
:return: tuple - id and filename
"""
pass
class BackendLocal(Backend):
"""
Local backend
Stores file locally in a directory without transferring to remote storage
"""
def __init__(self, local_path=None, url='http://localhost'):
"""
Backend constructor
Requires a local storage path and base storage url.
:param local_path: string - path to local temp dir
:param url: string - base storage url
"""
super().__init__(local_path, url)
def id_to_path(self, id):
"""
Id to path
Returns a list of directories extracted from id
:param id: string, - object id
:return: list
"""
parts = id.lower().split('-')[0:5]
tail = id[len('-'.join(parts)) + 1:]
parts.append(tail)
return parts
def parse_url(self, url):
"""
Parse url
Processes url to return a tuple of id and filename. This is being used
when we create dynamic image resizes to retrieve the original based on
resize filename.
:param url: string - resize url
:return: tuple - id and filename
"""
url = url.replace(self._url, '')
url = url.strip('/').lower()
url = url.split('/')
id = '-'.join(url[:-1])
filename = url[-1]
return id, filename
def put(self, src, id, force=False):
"""
Put file to storage
Does not require a filename as it will be extracted from provided id.
the resulting path will have following structure:
3c72aedc/ba25/11e6/569/406c8f413974/original-filename.jpg
:param src: string - path to source file
:param id: string - generated id
:param force: bool - whether to overwrite existing
:return: string - generated id
"""
filename = '-'.join(id.split('-')[5:])
return self.put_variant(src, id, filename, force)
def put_variant(self, src, id, filename, force=False):
"""
Put file variant to storage
Save local file in storage under given id and filename. Will raise an
exception on an attempt to overwrite existing file which you can force
to ignore.
"""
if not os.path.exists(src):
msg = 'Unable to find local file [{}]'
raise x.LocalFileNotFound(msg.format(src))
parts = self.id_to_path(id)
dir = os.path.join(self.path, *parts)
os.makedirs(dir, exist_ok=True)
dst = os.path.join(self.path, *parts, filename.lower())
if not force and os.path.exists(dst):
msg = 'File [' + filename + '] exists under [' + id + ']. '
msg += 'Use force option to overwrite.'
raise x.FileExists(msg)
shutil.copyfile(src, dst)
return id
def delete(self, id):
"""
Delete
Remove file from storage by id
"""
id = str(id)
path = os.path.join(self.path, *id.split('-')[0:5])
shutil.rmtree(path)
return True
def retrieve_original(self, id, local_path):
"""
Retrieve original
Download file from storage and put to local temp path
"""
path = self.id_to_path(id)
filename = path[5]
src = os.path.join(self.path, *path, filename)
dst_dir = os.path.join(local_path, id)
dst = os.path.join(dst_dir, filename)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, exist_ok=True)
shutil.copyfile(src, dst)
return dst
class BackendS3(Backend):
"""
Amazon S3 backend
Stores files in an amazon s3 bucket
"""
def __init__(self,
key_id,
access_secret,
bucket,
local_path,
url='http://localhost'):
"""
S3 Backend constructor
Creates an instance of s3 backend, requires credentials to access
amazon s3 and bucket name.
:param key_id: string - AWS IAM Key id
:param access_secret: string - AWS IAM Access secret
:param bucket: string - AWS S3 bucket name, e.g. 'test-bucket'
:param local_path: string - path to local temp dir
:param url: string - base storage url
"""
self.key_id = key_id
self.access_secret = access_secret
self.bucket = bucket
super().__init__(local_path, url)
def id_to_path(self, id):
"""
Id to path
Returns a list of directories extracted from id
:param id: string, - object id
:return: list
"""
pass
def parse_url(self, url):
"""
Parse url
Processes url to return a tuple of id and filename. This is being used
when we create dynamic image resizes to retrieve the original based on
resize filename.
:param url: string - resize url
:return: tuple - id and filename
"""
pass
def put(self, src, id, force=False):
"""
Put file to storage
Does not require a filename as it will be extracted from provided id.
the resulting path will have following structure:
3c72aedc/ba25/11e6/569/406c8f413974/original-filename.jpg
:param src: string - path to source file
:param id: string - generated id
:param force: bool - whether to overwrite existing
:return: string - generated id
"""
pass
def put_variant(self, src, id, filename, force=False):
"""
Put file variant to storage
Save local file in storage under given id and filename. Will raise an
exception on an attempt to overwrite existing file which you can force
to ignore.
"""
pass
def delete(self, id):
"""
Delete
Remove file from storage by id
"""
pass
def retrieve_original(self, id, local_path):
"""
Retrieve original
Download file from storage and put to local temp path
"""
pass
|
# -*- coding: utf-8 -*-
import os.path
import sys
from pybtex.database.input import bibtex
from citation_vim.item import Item
from citation_vim.utils import check_path, raiseError
class bibtexParser(object):
def __init__(self, context):
self.context = context
self.bibtex_file = check_path(self.context.bibtex_file)
def load(self):
"""
Returns:
A bibtex file as an array of Items.
"""
items = []
try:
bib_data = self._read_file(self.bibtex_file)
except Exception as e:
raiseError(u"Failed to read {}".format(self.bibtex_file, '\r', u"Message: {}".format(str(e))))
for key in bib_data.entries:
bib_entry = bib_data.entries[key]
item = Item()
item.abstract = self.get_field(bib_entry, "abstract")
item.author = self.format_author(bib_entry)
item.collections = []
item.date = self.get_field(bib_entry, "year")
item.doi = self.get_field(bib_entry, "doi")
item.file = self.format_file(bib_entry)
item.isbn = self.get_field(bib_entry, "isbn")
item.publication = self.get_field(bib_entry, "journal")
item.key = key
item.language = self.get_field(bib_entry, "language")
item.issue = self.get_field(bib_entry, "number")
item.notes = self.get_field(bib_entry, "annote")
item.pages = self.get_field(bib_entry, "pages")
item.publisher = self.get_field(bib_entry, "publisher")
item.tags = self.get_field(bib_entry, "keyword")
item.title = self.get_field(bib_entry, "title")
item.type = bib_entry.type
item.url = self.format_url(bib_entry)
item.volume = self.get_field(bib_entry, "volume")
item.combine()
items.append(item)
return items
def _read_file(self, filename):
parser = bibtex.Parser()
return parser.parse_file(filename)
def strip_chars(self, string):
return string.replace("{","").replace("}","")
def get_field(self, entry, field):
output = entry.fields[field] if field in entry.fields else ""
output = self.strip_chars(output)
return output
def format_author(self, entry):
try:
persons = entry.persons[u"author"]
if sys.version_info[0] == 2:
authors = [unicode(au) for au in persons]
elif sys.version_info[0] == 3:
authors = [str(au) for au in persons]
except KeyError:
authors = [""]
authors = self.strip_chars("; ".join(authors))
return authors
def format_file(self, entry):
output = ""
if u"file" in entry.fields:
for file in entry.fields[u"file"].split(";"):
details = file.split(":")
if 2 < len(details) and details[2] == "application/pdf":
output = details[1]
break
return output
def format_url(self, entry):
output = ""
if u"file" in entry.fields:
for file in entry.fields[u"file"].split(";"):
details = file.split(":")
if 2 < len(details) and details[2] != "application/pdf":
output = details[1]
break
return output
def format_tags(entry):
output = ""
if u"keywords" in entry.fields:
output = ", ".join(entry.fields[u"keywords"])
return output
Problem: Bibtex authors are not printed pretty
Solution: Bring authors in the same format as zotero authors. Then
use the same pretty print mechanism including the et al. shortcut.
# -*- coding: utf-8 -*-
import os.path
import sys
from pybtex.database.input import bibtex
from citation_vim.item import Item
from citation_vim.utils import check_path, raiseError
class bibtexParser(object):
def __init__(self, context):
self.context = context
self.bibtex_file = check_path(self.context.bibtex_file)
self.et_al_limit = context.et_al_limit
def load(self):
"""
Returns:
A bibtex file as an array of Items.
"""
items = []
try:
bib_data = self._read_file(self.bibtex_file)
except Exception as e:
raiseError(u"Failed to read {}".format(self.bibtex_file, '\r', u"Message: {}".format(str(e))))
for key in bib_data.entries:
bib_entry = bib_data.entries[key]
item = Item()
item.abstract = self.get_field(bib_entry, "abstract")
item.author = self.format_author(bib_entry)
item.collections = []
item.date = self.get_field(bib_entry, "year")
item.doi = self.get_field(bib_entry, "doi")
item.file = self.format_file(bib_entry)
item.isbn = self.get_field(bib_entry, "isbn")
item.publication = self.get_field(bib_entry, "journal")
item.key = key
item.language = self.get_field(bib_entry, "language")
item.issue = self.get_field(bib_entry, "number")
item.notes = self.get_field(bib_entry, "annote")
item.pages = self.get_field(bib_entry, "pages")
item.publisher = self.get_field(bib_entry, "publisher")
item.tags = self.get_field(bib_entry, "keyword")
item.title = self.get_field(bib_entry, "title")
item.type = bib_entry.type
item.url = self.format_url(bib_entry)
item.volume = self.get_field(bib_entry, "volume")
item.combine()
items.append(item)
return items
def _read_file(self, filename):
parser = bibtex.Parser()
return parser.parse_file(filename)
def strip_chars(self, string):
return string.replace("{","").replace("}","")
def get_field(self, entry, field):
output = entry.fields[field] if field in entry.fields else ""
output = self.strip_chars(output)
return output
def format_author(self, entry):
"""
Returns:
A pretty representation of the author.
"""
try:
persons = entry.persons[u"author"]
if sys.version_info[0] == 2:
authors = [unicode(au).split(",") for au in persons]
elif sys.version_info[0] == 3:
authors = [str(au).split(",") for au in persons]
except KeyError:
authors = []
if authors == []:
return ""
if len(authors) > int(self.et_al_limit):
return u"%s et al." % authors[0][0]
if len(authors) > 2:
auth_string = u""
for author in authors[:-1]:
auth_string += author[0] + ', '
return auth_string + u"& " + authors[-1][0]
if len(authors) == 2:
return authors[0][0] + u" & " + authors[1][0]
return ', '.join(authors[0])
def format_file(self, entry):
output = ""
if u"file" in entry.fields:
for file in entry.fields[u"file"].split(";"):
details = file.split(":")
if 2 < len(details) and details[2] == "application/pdf":
output = details[1]
break
return output
def format_url(self, entry):
output = ""
if u"file" in entry.fields:
for file in entry.fields[u"file"].split(";"):
details = file.split(":")
if 2 < len(details) and details[2] != "application/pdf":
output = details[1]
break
return output
def format_tags(entry):
output = ""
if u"keywords" in entry.fields:
output = ", ".join(entry.fields[u"keywords"])
return output
|
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes.model import db_exists
from webnotes.model.bean import copy_doclist
from webnotes import msgprint
sql = webnotes.conn.sql
class DocType:
def __init__(self,doc,doclist=[]):
self.doc, self.doclist = doc,doclist
#--------------------get naming series from sales invoice-----------------
def get_series(self):
import webnotes.model.doctype
docfield = webnotes.model.doctype.get('Sales Invoice')
series = [d.options for d in docfield if d.doctype == 'DocField' and d.fieldname == 'naming_series']
return series and series[0] or ''
def validate(self):
res = sql("select name, user from `tabPOS Setting` where ifnull(user, '') = '%s' and name != '%s' and company = '%s'" % (self.doc.user, self.doc.name, self.doc.company))
if res:
if res[0][1]:
msgprint("POS Setting '%s' already created for user: '%s' and company: '%s'"%(res[0][0], res[0][1], self.doc.company), raise_exception=1)
else:
msgprint("Global POS Setting already created - %s for this company: '%s'" % (res[0][0], self.doc.company), raise_exception=1)
expense account optional mandatory in pos setting
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes import msgprint, _
from webnotes.utils import cint
class DocType:
def __init__(self,doc,doclist=[]):
self.doc, self.doclist = doc,doclist
def get_series(self):
import webnotes.model.doctype
docfield = webnotes.model.doctype.get('Sales Invoice')
series = [d.options for d in docfield
if d.doctype == 'DocField' and d.fieldname == 'naming_series']
return series and series[0] or ''
def validate(self):
self.check_for_duplicate()
self.validate_expense_account()
def check_for_duplicate(self):
res = webnotes.conn.sql("""select name, user from `tabPOS Setting`
where ifnull(user, '') = %s and name != %s and company = %s""",
(self.doc.user, self.doc.name, self.doc.company))
if res:
if res[0][1]:
msgprint("POS Setting '%s' already created for user: '%s' and company: '%s'" %
(res[0][0], res[0][1], self.doc.company), raise_exception=1)
else:
msgprint("Global POS Setting already created - %s for this company: '%s'" %
(res[0][0], self.doc.company), raise_exception=1)
def validate_expense_account(self):
if cint(webnotes.defaults.get_global_default("auto_inventory_accounting")) \
and not self.doc.expense_account:
msgprint(_("Expense Account is mandatory"), raise_exception=1) |
"""
This will be part of guitools
Since guitools is currently in a state of flux,
we will merge those to module later
"""
# Then build and install the modules
from distutils.core import setup, Extension
setup(
name="guicomm",
version = "0.1",
description = "Python module for SANS gui tools",
author = "University of Tennessee",
#author_email = "",
url = "http://danse.chem.utk.edu",
# Place this module under the sans package
#ext_package = "sans",
# Use the pure python modules
package_dir = {"sans.guicomm":"."},
packages = ["sans.guicomm"]
)
added install script
"""
This will be part of guitools
Since guitools is currently in a state of flux,
we will merge those to module later
"""
import sys
if len(sys.argv) == 1:
sys.argv.append('install')
# Then build and install the modules
from distutils.core import setup, Extension
setup(
name="guicomm",
version = "0.1",
description = "Python module for SANS gui tools",
author = "University of Tennessee",
#author_email = "",
url = "http://danse.chem.utk.edu",
# Place this module under the sans package
#ext_package = "sans",
# Use the pure python modules
package_dir = {"sans.guicomm":"."},
packages = ["sans.guicomm"]
)
|
import os
cert = '/auth/cert.pem'
key = '/auth/privkey.pem'
pwd = '/auth/pwd.txt'
# Password created with IPython.lib.passwd().
try:
with open(pwd) as f:
c.NotebookApp.password = f.readline().rstrip(os.linesep)
except FileNotFoundError:
pass
# Self-signed or recognized SSL certificate.
# openssl req -x509 -nodes -days 20 -newkey rsa:1024 -keyout privkey.pem -out cert.pem
# sudo letsencrypt certonly --standalone -d 54.93.112.97.xip.io
if os.path.isfile(cert) and os.path.isfile(key):
c.NotebookApp.certfile = cert
c.NotebookApp.keyfile = key
disable token-based authentication
import os
cert = '/auth/cert.pem'
key = '/auth/privkey.pem'
pwd = '/auth/pwd.txt'
# Password created with IPython.lib.passwd().
try:
with open(pwd) as f:
c.NotebookApp.password = f.readline().rstrip(os.linesep)
except FileNotFoundError:
pass
# No token-based authentication. This notebook will be protected by a password
# if made publicly available.
c.NotebookApp.token = ''
# Self-signed or recognized SSL certificate.
# openssl req -x509 -nodes -days 20 -newkey rsa:1024 -keyout privkey.pem -out cert.pem
# sudo letsencrypt certonly --standalone -d 54.93.112.97.xip.io
if os.path.isfile(cert) and os.path.isfile(key):
c.NotebookApp.certfile = cert
c.NotebookApp.keyfile = key
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8 tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# Copyright © Mike Dacre <mike.dacre@gmail.com>
#
# Distributed under terms of the MIT license
"""
#=======================================================================================#
# #
# FILE: run_matlab (python 3) #
# AUTHOR: Michael D Dacre, mike.dacre@gmail.com #
# ORGANIZATION: Stanford University #
# LICENSE: MIT License, Property of Stanford, Use as you wish #
# VERSION: 0.1 #
# CREATED: 2014-08-22 16:26 #
# Last modified: 2014-08-22 16:56
# #
# DESCRIPTION: Create a bunch of temporary matlab scripts to call some other #
# matlab script and then submit to the cluster. #
# #
# Requires that the matlab function be written to accept imput #
# variables. #
# #
# Right now only works with torque jobs tools and requires #
# pbs_torque from #
# https://github.com/MikeDacre/torque_queue_manager and logging #
# functions from http://j.mp/python_logme #
# #
# USAGE: -p or --path allows the addition of multiple matlab paths #
# -v or --variables is a list of variables to pass to the function #
# This list must be space separated. Each space separated item will #
# be run as a separate matlab job. #
# You may comma separate variables if you wish those variables to #
# passed as multiple arguments to a single function (e.g. not as #
# separate jobs. #
# #
# <function> is a positional arg and is the name of the function to run #
# #
# Run as a script or import as a module. See '-h' or 'help' for usage #
# #
#=======================================================================================#
"""
import logme
from pbs_torque import job
def _get_args():
"""Command Line Argument Parsing"""
import argparse, sys
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Optional Arguments
parser.add_argument('-p', '--path', nargs='?',
help="Comma or space separated list of matlab paths")
parser.add_argument('-V', '--variables', nargs='?',
help="Space separated list variables to pass to function")
parser.add_argument('-v', dest='verbose', help="Verbose output")
# Function name
parser.add_argument('function',
help="Name of the matlab function to run")
# Optional Files
parser.add_argument('-l', '--logfile', nargs='?',
help="Log File, Default STDERR (append mode)")
return parser
# Main function for direct running
def main():
"""Run directly"""
# Get commandline arguments
parser = _get_args()
args = parser.parse_args()
# The end
if __name__ == '__main__':
main()
Wrapper functions done, starting on main function
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8 tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# Copyright © Mike Dacre <mike.dacre@gmail.com>
#
# Distributed under terms of the MIT license
"""
#=======================================================================================#
# #
# FILE: run_matlab (python 3) #
# AUTHOR: Michael D Dacre, mike.dacre@gmail.com #
# ORGANIZATION: Stanford University #
# LICENSE: MIT License, Property of Stanford, Use as you wish #
# VERSION: 0.1 #
# CREATED: 2014-08-22 16:26 #
# Last modified: 2014-08-25 13:23
# #
# DESCRIPTION: Create a bunch of temporary matlab scripts to call some other #
# matlab script and then submit to the cluster. #
# #
# Requires that the matlab function be written to accept imput #
# variables. #
# #
# Right now only works with torque jobs tools and requires #
# pbs_torque from #
# https://github.com/MikeDacre/torque_queue_manager and logging #
# functions from http://j.mp/python_logme #
# #
# USAGE: -p or --path allows the addition of multiple matlab paths #
# <function> is a positional arg and is the name of the function to run #
# #
# STDIN: Variable list for matlab #
# This list must be space or newline separated. Each space separated #
# item will be run as a separate matlab job. # #
# To provide multiple variables to the matlab function, #
# comma separate the variables on a single line. #
# #
# #
# Run as a script or import as a module. See '-h' or 'help' for usage #
# #
#=======================================================================================#
"""
# Set to true to get pointlessly verbose output
debug=True
def submit_matlab_jobs(paths, variables, function, verbose=False, logfile=None):
""" Take a list of paths, a list of lists of variables, and a single Function
and submit one job for every list in the lists of variables (each item of
the second dimension will be submitted as an additional argument to the
matlab function. """
import logme
from pbs_torque import job
def _get_args():
""" Command Line Argument Parsing """
import argparse
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Optional Arguments
parser.add_argument('-p', '--path',
help="Comma separated list of matlab paths")
parser.add_argument('-v', '--verbose', action='store_true', help="Verbose output")
# Function name
parser.add_argument('function',
help="Name of the matlab function to run")
# Optional Files
parser.add_argument('-l', '--logfile',
help="Log File, Default STDERR (append mode)")
return parser
# Main function for direct running
def main():
""" Run directly """
from sys import stdin
from os import path
# Get commandline arguments
parser = _get_args()
args = parser.parse_args()
# Get variable list from STDIN
variables = [i.split(',') for i in stdin.read().rstrip().split('\n')]
# Split paths
paths = args.path.split(',')
if debug:
print(paths)
print(args.function)
print(variables)
submit_matlab_jobs(paths=paths, variables=variables, function=function, verbose=args.verbose, logfile=args.logfile)
# The end
if __name__ == '__main__':
main()
|
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing CUDA toolkit installation and cleanup functions.
This module installs CUDA toolkit from NVIDIA, configures gpu clock speeds
and autoboost settings, and exposes a method to collect gpu metadata. Currently
Tesla K80 and P100 gpus are supported, provided that there is only a single
type of gpu per system.
"""
import posixpath
import re
from absl import flags
from perfkitbenchmarker.linux_packages import nvidia_driver
# There is no way to tell the apt-get installation
# method what dir to install the cuda toolkit to
CUDA_HOME = '/usr/local/cuda'
flags.DEFINE_enum(
'cuda_toolkit_version',
'11.0', ['9.0', '10.0', '10.1', '10.2', '11.0', 'None', ''],
'Version of CUDA Toolkit to install. '
'Input "None" or empty string to skip installation',
module_name=__name__)
_INSTALL_DEMO_SUITE = flags.DEFINE_boolean(
'cuda_install_demo_suite', False, 'Install CUDA demo suite.')
FLAGS = flags.FLAGS
CUDA_PIN = 'https://developer.download.nvidia.com/compute/cuda/repos/{os}/x86_64/cuda-{os}.pin'
CUDA_11_0_TOOLKIT = 'http://developer.download.nvidia.com/compute/cuda/11.0.2/local_installers/cuda-repo-{os}-11-0-local_11.0.2-450.51.05-1_amd64.deb'
CUDA_10_2_TOOLKIT = 'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda-repo-{os}-10-2-local-10.2.89-440.33.01_1.0-1_amd64.deb'
CUDA_10_1_TOOLKIT = 'https://developer.download.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda-repo-{os}-10-1-local-10.1.243-418.87.00_1.0-1_amd64.deb'
CUDA_10_0_TOOLKIT = 'https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda-repo-{os}-10-0-local-10.0.130-410.48_1.0-1_amd64'
CUDA_9_0_TOOLKIT = 'https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-{os}-9-0-local_9.0.176-1_amd64-deb'
CUDA_9_0_PATCH = 'https://developer.nvidia.com/compute/cuda/9.0/Prod/patches/1/cuda-repo-{os}-9-0-local-cublas-performance-update_1.0-1_amd64-deb'
class UnsupportedCudaVersionError(Exception):
pass
class NvccParseOutputError(Exception):
pass
def GetMetadata(vm):
"""Returns gpu-specific metadata as a dict.
Args:
vm: virtual machine to operate on
Returns:
A dict of gpu- and CUDA- specific metadata.
"""
metadata = nvidia_driver.GetMetadata(vm)
metadata['cuda_toolkit_version'] = FLAGS.cuda_toolkit_version
metadata['cuda_toolkit_home'] = CUDA_HOME
metadata['vm_name'] = vm.name
return metadata
def DoPostInstallActions(vm):
"""Perform post NVIDIA driver install action on the vm.
Args:
vm: the virtual machine to operate on
"""
nvidia_driver.DoPostInstallActions(vm)
def GetCudaToolkitVersion(vm):
"""Get the CUDA toolkit version on the vm, based on nvcc.
Args:
vm: the virtual machine to query
Returns:
A string containing the active CUDA toolkit version,
None if nvcc could not be found
Raises:
NvccParseOutputError: On can not parse nvcc output
"""
stdout, _ = vm.RemoteCommand(
posixpath.join(CUDA_HOME, 'bin/nvcc') + ' --version',
ignore_failure=True, suppress_warning=True)
if bool(stdout.rstrip()):
regex = r'release (\S+),'
match = re.search(regex, stdout)
if match:
return str(match.group(1))
raise NvccParseOutputError('Unable to parse nvcc version output from {}'
.format(stdout))
else:
return None
def _InstallCudaPatch(vm, patch_url):
"""Installs CUDA Toolkit patch from NVIDIA.
Args:
vm: VM to install patch on
patch_url: url of the CUDA patch to install
"""
# Need to append .deb to package name because the file downloaded from
# NVIDIA is missing the .deb extension.
basename = posixpath.basename(patch_url) + '.deb'
vm.RemoteCommand('wget -q %s -O %s' % (patch_url,
basename))
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-get update')
# Need to be extra careful on the command below because without these
# precautions, it was brining up a menu option about grub's menu.lst
# on AWS Ubuntu16.04 and thus causing the RemoteCommand to hang and fail.
vm.RemoteCommand(
'sudo DEBIAN_FRONTEND=noninteractive apt-get upgrade -yq cuda')
def _InstallCuda9Point0(vm):
"""Installs CUDA Toolkit 9.0 from NVIDIA.
Args:
vm: VM to install CUDA on
"""
basename = posixpath.basename(CUDA_9_0_TOOLKIT.format(os=vm.OS_TYPE)) + '.deb'
vm.RemoteCommand('wget -q %s -O %s' % (CUDA_9_0_TOOLKIT.format(os=vm.OS_TYPE),
basename))
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages('cuda-toolkit-9-0 cuda-tools-9-0 cuda-libraries-9-0 '
'cuda-libraries-dev-9-0')
_InstallCudaPatch(vm, CUDA_9_0_PATCH.format(os=vm.OS_TYPE))
def _InstallCuda10Point0(vm):
"""Installs CUDA Toolkit 10.0 from NVIDIA.
Args:
vm: VM to install CUDA on
"""
basename = (
f'{posixpath.basename(CUDA_10_0_TOOLKIT.format(os=vm.OS_TYPE))}.deb')
vm.RemoteCommand(f'wget -q {CUDA_10_0_TOOLKIT.format(os=vm.OS_TYPE)} -O '
f'{basename}')
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-key add '
'/var/cuda-repo-10-0-local-10.0.130-410.48/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages('cuda-toolkit-10-0 cuda-tools-10-0 cuda-libraries-10-0 '
'cuda-libraries-dev-10-0')
def _InstallCuda10Point1(vm):
"""Installs CUDA Toolkit 10.1 from NVIDIA.
Args:
vm: VM to install CUDA on
"""
basename = posixpath.basename(CUDA_10_1_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('wget -q %s' % CUDA_PIN.format(os=vm.OS_TYPE))
vm.RemoteCommand(f'sudo mv cuda-{vm.OS_TYPE}.pin '
'/etc/apt/preferences.d/cuda-repository-pin-600')
vm.RemoteCommand('wget -q %s' % CUDA_10_1_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-key add '
'/var/cuda-repo-10-1-local-10.1.243-418.87.00/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages('cuda-toolkit-10-1 cuda-tools-10-1 cuda-libraries-10-1 '
'cuda-libraries-dev-10-1')
def _InstallCuda10Point2(vm):
"""Installs CUDA Toolkit 10.2 from NVIDIA.
Args:
vm: VM to install CUDA on
"""
basename = posixpath.basename(CUDA_10_2_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('wget -q %s' % CUDA_PIN.format(os=vm.OS_TYPE))
vm.RemoteCommand(f'sudo mv cuda-{vm.OS_TYPE}.pin '
'/etc/apt/preferences.d/cuda-repository-pin-600')
vm.RemoteCommand('wget -q %s' % CUDA_10_2_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-key add '
'/var/cuda-repo-10-2-local-10.2.89-440.33.01/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages('cuda-toolkit-10-2 cuda-tools-10-2 cuda-libraries-10-2 '
'cuda-libraries-dev-10-2')
def _InstallCuda11Point0(vm):
"""Installs CUDA Toolkit 11.0 from NVIDIA.
Args:
vm: VM to install CUDA on
"""
basename = posixpath.basename(CUDA_11_0_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('wget -q %s' % CUDA_PIN.format(os=vm.OS_TYPE))
vm.RemoteCommand(f'sudo mv cuda-{vm.OS_TYPE}.pin '
'/etc/apt/preferences.d/cuda-repository-pin-600')
vm.RemoteCommand('wget -q %s' % CUDA_11_0_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-key add '
f'/var/cuda-repo-{vm.OS_TYPE}-11-0-local/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages('cuda-toolkit-11-0 cuda-tools-11-0 cuda-libraries-11-0 '
'cuda-libraries-dev-11-0')
def AptInstall(vm):
"""Installs CUDA toolkit on the VM if not already installed."""
version_to_install = FLAGS.cuda_toolkit_version
if (version_to_install == 'None' or not version_to_install):
return
current_version = GetCudaToolkitVersion(vm)
if current_version == version_to_install:
return
cuda_path = f'/usr/local/cuda-{FLAGS.cuda_toolkit_version}'
if vm.TryRemoteCommand(f'stat {cuda_path}'):
vm.RemoteCommand('sudo rm -rf /usr/local/cuda', ignore_failure=True)
vm.RemoteCommand(f'sudo ln -s {cuda_path} /usr/local/cuda')
return
vm.Install('build_tools')
vm.Install('wget')
vm.Install('nvidia_driver')
if version_to_install == '9.0':
_InstallCuda9Point0(vm)
elif version_to_install == '10.0':
_InstallCuda10Point0(vm)
elif version_to_install == '10.1':
_InstallCuda10Point1(vm)
elif version_to_install == '10.2':
_InstallCuda10Point2(vm)
elif version_to_install == '11.0':
_InstallCuda11Point0(vm)
else:
raise UnsupportedCudaVersionError()
if _INSTALL_DEMO_SUITE.value:
vm.InstallPackages(
f'cuda-demo-suite-{FLAGS.cuda_toolkit_version.replace(".", "-")}')
DoPostInstallActions(vm)
# NVIDIA CUDA Profile Tools Interface.
# This library provides advanced profiling support
if version_to_install in ('9.0', '10.0'):
# cupti is part of cuda>=10.1, and installed as cuda-cupti-10-1/2
vm.RemoteCommand('sudo apt-get install -y libcupti-dev')
def YumInstall(vm):
"""Installs CUDA toolkit on the VM if not already installed.
TODO: PKB currently only supports the installation of CUDA toolkit on Ubuntu.
Args:
vm: VM to install CUDA on
"""
del vm # unused
raise NotImplementedError()
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
pass
def Uninstall(vm):
"""Removes the CUDA toolkit.
Args:
vm: VM that installed CUDA
Note that reinstallation does not work correctly, i.e. you cannot reinstall
CUDA by calling _Install() again.
"""
vm.RemoteCommand(f'rm -f cuda-repo-{vm.OS_TYPE}*')
vm.RemoteCommand('sudo rm -rf {cuda_home}'.format(cuda_home=CUDA_HOME))
Adds CUDA 11.1, 11.2 and 11.3 support.
PiperOrigin-RevId: 378410989
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing CUDA toolkit installation and cleanup functions.
This module installs CUDA toolkit from NVIDIA, configures gpu clock speeds
and autoboost settings, and exposes a method to collect gpu metadata. Currently
Tesla K80 and P100 gpus are supported, provided that there is only a single
type of gpu per system.
"""
import posixpath
import re
from absl import flags
from perfkitbenchmarker.linux_packages import nvidia_driver
# There is no way to tell the apt-get installation
# method what dir to install the cuda toolkit to
CUDA_HOME = '/usr/local/cuda'
flags.DEFINE_enum(
'cuda_toolkit_version', '11.0',
['9.0', '10.0', '10.1', '10.2', '11.0', '11.1', '11.2', '11.3', 'None', ''],
'Version of CUDA Toolkit to install. '
'Input "None" or empty string to skip installation',
module_name=__name__)
_INSTALL_DEMO_SUITE = flags.DEFINE_boolean(
'cuda_install_demo_suite', False, 'Install CUDA demo suite.')
FLAGS = flags.FLAGS
CUDA_PIN = 'https://developer.download.nvidia.com/compute/cuda/repos/{os}/x86_64/cuda-{os}.pin'
CUDA_11_0_TOOLKIT = 'http://developer.download.nvidia.com/compute/cuda/11.0.3/local_installers/cuda-repo-{os}-11-0-local_11.0.3-450.51.06-1_amd64.deb'
CUDA_11_1_TOOLKIT = 'http://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda-repo-{os}-11-1-local_11.1.1-455.32.00-1_amd64.deb'
CUDA_11_2_TOOLKIT = 'http://developer.download.nvidia.com/compute/cuda/11.2.2/local_installers/cuda-repo-{os}-11-2-local_11.2.2-460.32.03-1_amd64.deb'
CUDA_11_3_TOOLKIT = 'http://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda-repo-{os}-11-3-local_11.3.1-465.19.01-1_amd64.deb'
CUDA_10_2_TOOLKIT = 'http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda-repo-{os}-10-2-local-10.2.89-440.33.01_1.0-1_amd64.deb'
CUDA_10_1_TOOLKIT = 'https://developer.download.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda-repo-{os}-10-1-local-10.1.243-418.87.00_1.0-1_amd64.deb'
CUDA_10_0_TOOLKIT = 'https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda-repo-{os}-10-0-local-10.0.130-410.48_1.0-1_amd64'
CUDA_9_0_TOOLKIT = 'https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-{os}-9-0-local_9.0.176-1_amd64-deb'
CUDA_9_0_PATCH = 'https://developer.nvidia.com/compute/cuda/9.0/Prod/patches/1/cuda-repo-{os}-9-0-local-cublas-performance-update_1.0-1_amd64-deb'
class UnsupportedCudaVersionError(Exception):
pass
class NvccParseOutputError(Exception):
pass
def GetMetadata(vm):
"""Returns gpu-specific metadata as a dict.
Args:
vm: virtual machine to operate on
Returns:
A dict of gpu- and CUDA- specific metadata.
"""
metadata = nvidia_driver.GetMetadata(vm)
metadata['cuda_toolkit_version'] = FLAGS.cuda_toolkit_version
metadata['cuda_toolkit_home'] = CUDA_HOME
metadata['vm_name'] = vm.name
return metadata
def DoPostInstallActions(vm):
"""Perform post NVIDIA driver install action on the vm.
Args:
vm: the virtual machine to operate on
"""
nvidia_driver.DoPostInstallActions(vm)
def GetCudaToolkitVersion(vm):
"""Get the CUDA toolkit version on the vm, based on nvcc.
Args:
vm: the virtual machine to query
Returns:
A string containing the active CUDA toolkit version,
None if nvcc could not be found
Raises:
NvccParseOutputError: On can not parse nvcc output
"""
stdout, _ = vm.RemoteCommand(
posixpath.join(CUDA_HOME, 'bin/nvcc') + ' --version',
ignore_failure=True, suppress_warning=True)
if bool(stdout.rstrip()):
regex = r'release (\S+),'
match = re.search(regex, stdout)
if match:
return str(match.group(1))
raise NvccParseOutputError('Unable to parse nvcc version output from {}'
.format(stdout))
else:
return None
def _InstallCudaPatch(vm, patch_url):
"""Installs CUDA Toolkit patch from NVIDIA.
Args:
vm: VM to install patch on
patch_url: url of the CUDA patch to install
"""
# Need to append .deb to package name because the file downloaded from
# NVIDIA is missing the .deb extension.
basename = posixpath.basename(patch_url) + '.deb'
vm.RemoteCommand('wget -q %s -O %s' % (patch_url,
basename))
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-get update')
# Need to be extra careful on the command below because without these
# precautions, it was brining up a menu option about grub's menu.lst
# on AWS Ubuntu16.04 and thus causing the RemoteCommand to hang and fail.
vm.RemoteCommand(
'sudo DEBIAN_FRONTEND=noninteractive apt-get upgrade -yq cuda')
def _InstallCuda9Point0(vm):
"""Installs CUDA Toolkit 9.0 from NVIDIA.
Args:
vm: VM to install CUDA on
"""
basename = posixpath.basename(CUDA_9_0_TOOLKIT.format(os=vm.OS_TYPE)) + '.deb'
vm.RemoteCommand('wget -q %s -O %s' % (CUDA_9_0_TOOLKIT.format(os=vm.OS_TYPE),
basename))
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages('cuda-toolkit-9-0 cuda-tools-9-0 cuda-libraries-9-0 '
'cuda-libraries-dev-9-0')
_InstallCudaPatch(vm, CUDA_9_0_PATCH.format(os=vm.OS_TYPE))
def _InstallCuda10Point0(vm):
"""Installs CUDA Toolkit 10.0 from NVIDIA.
Args:
vm: VM to install CUDA on
"""
basename = (
f'{posixpath.basename(CUDA_10_0_TOOLKIT.format(os=vm.OS_TYPE))}.deb')
vm.RemoteCommand(f'wget -q {CUDA_10_0_TOOLKIT.format(os=vm.OS_TYPE)} -O '
f'{basename}')
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-key add '
'/var/cuda-repo-10-0-local-10.0.130-410.48/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages('cuda-toolkit-10-0 cuda-tools-10-0 cuda-libraries-10-0 '
'cuda-libraries-dev-10-0')
def _InstallCuda10Point1(vm):
"""Installs CUDA Toolkit 10.1 from NVIDIA.
Args:
vm: VM to install CUDA on
"""
basename = posixpath.basename(CUDA_10_1_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('wget -q %s' % CUDA_PIN.format(os=vm.OS_TYPE))
vm.RemoteCommand(f'sudo mv cuda-{vm.OS_TYPE}.pin '
'/etc/apt/preferences.d/cuda-repository-pin-600')
vm.RemoteCommand('wget -q %s' % CUDA_10_1_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-key add '
'/var/cuda-repo-10-1-local-10.1.243-418.87.00/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages('cuda-toolkit-10-1 cuda-tools-10-1 cuda-libraries-10-1 '
'cuda-libraries-dev-10-1')
def _InstallCuda10Point2(vm):
"""Installs CUDA Toolkit 10.2 from NVIDIA.
Args:
vm: VM to install CUDA on
"""
basename = posixpath.basename(CUDA_10_2_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('wget -q %s' % CUDA_PIN.format(os=vm.OS_TYPE))
vm.RemoteCommand(f'sudo mv cuda-{vm.OS_TYPE}.pin '
'/etc/apt/preferences.d/cuda-repository-pin-600')
vm.RemoteCommand('wget -q %s' % CUDA_10_2_TOOLKIT.format(os=vm.OS_TYPE))
vm.RemoteCommand('sudo dpkg -i %s' % basename)
vm.RemoteCommand('sudo apt-key add '
'/var/cuda-repo-10-2-local-10.2.89-440.33.01/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages('cuda-toolkit-10-2 cuda-tools-10-2 cuda-libraries-10-2 '
'cuda-libraries-dev-10-2')
def _InstallCuda11Generic(vm, toolkit_fmt, version_dash):
"""Installs CUDA Toolkit 11.x from NVIDIA.
Args:
vm: VM to install CUDA on
toolkit_fmt: format string to use for the toolkit name
version_dash: Version (ie 11-1) to install
"""
toolkit = toolkit_fmt.format(os=vm.OS_TYPE)
basename = posixpath.basename(toolkit)
vm.RemoteCommand(f'wget -q {CUDA_PIN.format(os=vm.OS_TYPE)}')
vm.RemoteCommand(f'sudo mv cuda-{vm.OS_TYPE}.pin '
'/etc/apt/preferences.d/cuda-repository-pin-600')
vm.RemoteCommand(f'wget -q {toolkit}')
vm.RemoteCommand(f'sudo dpkg -i {basename}')
vm.RemoteCommand(
'sudo apt-key add '
f'/var/cuda-repo-{vm.OS_TYPE}-{version_dash}-local/7fa2af80.pub')
vm.RemoteCommand('sudo apt-get update')
vm.InstallPackages(f'cuda-toolkit-{version_dash} '
f'cuda-tools-{version_dash} '
f'cuda-libraries-{version_dash} '
f'cuda-libraries-dev-{version_dash}')
def _InstallCuda11Point0(vm):
_InstallCuda11Generic(vm, CUDA_11_0_TOOLKIT, '11-0')
def _InstallCuda11Point1(vm):
_InstallCuda11Generic(vm, CUDA_11_1_TOOLKIT, '11-1')
def _InstallCuda11Point2(vm):
_InstallCuda11Generic(vm, CUDA_11_2_TOOLKIT, '11-2')
def _InstallCuda11Point3(vm):
_InstallCuda11Generic(vm, CUDA_11_3_TOOLKIT, '11-3')
def AptInstall(vm):
"""Installs CUDA toolkit on the VM if not already installed."""
version_to_install = FLAGS.cuda_toolkit_version
if (version_to_install == 'None' or not version_to_install):
return
current_version = GetCudaToolkitVersion(vm)
if current_version == version_to_install:
return
cuda_path = f'/usr/local/cuda-{FLAGS.cuda_toolkit_version}'
if vm.TryRemoteCommand(f'stat {cuda_path}'):
vm.RemoteCommand('sudo rm -rf /usr/local/cuda', ignore_failure=True)
vm.RemoteCommand(f'sudo ln -s {cuda_path} /usr/local/cuda')
return
vm.Install('build_tools')
vm.Install('wget')
vm.Install('nvidia_driver')
if version_to_install == '9.0':
_InstallCuda9Point0(vm)
elif version_to_install == '10.0':
_InstallCuda10Point0(vm)
elif version_to_install == '10.1':
_InstallCuda10Point1(vm)
elif version_to_install == '10.2':
_InstallCuda10Point2(vm)
elif version_to_install == '11.0':
_InstallCuda11Point0(vm)
elif version_to_install == '11.1':
_InstallCuda11Point1(vm)
elif version_to_install == '11.2':
_InstallCuda11Point2(vm)
elif version_to_install == '11.3':
_InstallCuda11Point3(vm)
else:
raise UnsupportedCudaVersionError()
if _INSTALL_DEMO_SUITE.value:
vm.InstallPackages(
f'cuda-demo-suite-{FLAGS.cuda_toolkit_version.replace(".", "-")}')
DoPostInstallActions(vm)
# NVIDIA CUDA Profile Tools Interface.
# This library provides advanced profiling support
if version_to_install in ('9.0', '10.0'):
# cupti is part of cuda>=10.1, and installed as cuda-cupti-10-1/2
vm.RemoteCommand('sudo apt-get install -y libcupti-dev')
def YumInstall(vm):
"""Installs CUDA toolkit on the VM if not already installed.
TODO: PKB currently only supports the installation of CUDA toolkit on Ubuntu.
Args:
vm: VM to install CUDA on
"""
del vm # unused
raise NotImplementedError()
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
pass
def Uninstall(vm):
"""Removes the CUDA toolkit.
Args:
vm: VM that installed CUDA
Note that reinstallation does not work correctly, i.e. you cannot reinstall
CUDA by calling _Install() again.
"""
vm.RemoteCommand(f'rm -f cuda-repo-{vm.OS_TYPE}*')
vm.RemoteCommand('sudo rm -rf {cuda_home}'.format(cuda_home=CUDA_HOME))
|
# -*- coding: utf-8 -*-
#The MIT License (MIT)
#
#Copyright (c) 2015,2018 Sami Salkosuo
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#
#edit command
#
from ..utils.utils import *
from ..database.database import *
from ..utils.functions import *
from .SuperCommand import *
from ..globals import *
from ..globals import GlobalVariables
class EditCommand(SuperCommand):
def __init__(self,cmd_handler):
super().__init__(cmd_handler)
def parseCommandArgs(self,userInputList):
cmd_parser = ThrowingArgumentParser(prog="edit",description='Edit account(s) that match given string.')
cmd_parser.add_argument('name', metavar='NAME', type=str, nargs=1,
help='Start of name.')
cmd_parser.add_argument('-id', required=False, action='store_true', help='Treat account name as account unique ID.')
(self.cmd_args,self.help_text)=parseCommandArgs(cmd_parser,userInputList)
def execute(self):
loadAccounts(GlobalVariables.KEY)
arg=self.cmd_args.name[0]
useID=False
if self.cmd_args.id:
useID=True
#put results in list so that update cursor doesn't interfere with select cursor when updating account
#there note about this here: http://apidoc.apsw.googlecode.com/hg/cursor.html
rows=list(executeSelect(COLUMNS_TO_SELECT_ORDERED_FOR_DISPLAY,arg,useID=useID))
for row in rows:
printAccountRow(row)
if boolValue(prompt("Edit this account (yes/no)? ")):
values=[]
name=modPrompt("Name",row[COLUMN_NAME])
values.append(name)
URL=modPrompt("URL",row[COLUMN_URL])
values.append(URL)
oldUserName=row[COLUMN_USERNAME]
promptStr="User name OLD: (%s) NEW" % oldUserName
username=askAccountUsername(promptStr,oldUserName)
values.append(username)
email=modPrompt("Email",row[COLUMN_EMAIL])
values.append(email)
originalPassword=row[COLUMN_PASSWORD]
pwd=askAccountPassword("Password OLD: (%s) NEW:" % (originalPassword),"Password generator is available. Type your password or type 'p'/'ps' to generate password or 'c' to use original password.",originalPassword)
values.append(pwd)
comment=modPrompt("Comment",row[COLUMN_COMMENT])
values.append(comment)
updated=formatTimestamp(currentTimestamp())
values.append(updated)
id=row[COLUMN_ID]
#if ID is 0, generate new ID
if int(id) == 0:
id=generateNewID()
values.append(id)
created=row[COLUMN_CREATED]
values.append(created)
sql="update accounts set %s=?,%s=?,%s=?,%s=?,%s=?,%s=?,%s=?,%s=? where %s=?" % (
COLUMN_NAME,
COLUMN_URL,
COLUMN_USERNAME,
COLUMN_EMAIL,
COLUMN_PASSWORD,
COLUMN_COMMENT,
COLUMN_UPDATED,
COLUMN_ID,
COLUMN_CREATED
)
executeSql(sql,tuple(values),commit=True)
saveAccounts()
print("Account updated.")
after edit, password is copied to clipboard
# -*- coding: utf-8 -*-
#The MIT License (MIT)
#
#Copyright (c) 2015,2018 Sami Salkosuo
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#
#edit command
#
from ..utils.utils import *
from ..database.database import *
from ..utils.functions import *
from .SuperCommand import *
from ..globals import *
from ..globals import GlobalVariables
class EditCommand(SuperCommand):
def __init__(self,cmd_handler):
super().__init__(cmd_handler)
def parseCommandArgs(self,userInputList):
cmd_parser = ThrowingArgumentParser(prog="edit",description='Edit account(s) that match given string.')
cmd_parser.add_argument('name', metavar='NAME', type=str, nargs=1,
help='Start of name.')
cmd_parser.add_argument('-id', required=False, action='store_true', help='Treat account name as account unique ID.')
(self.cmd_args,self.help_text)=parseCommandArgs(cmd_parser,userInputList)
def execute(self):
loadAccounts(GlobalVariables.KEY)
arg=self.cmd_args.name[0]
useID=False
if self.cmd_args.id:
useID=True
#put results in list so that update cursor doesn't interfere with select cursor when updating account
#there note about this here: http://apidoc.apsw.googlecode.com/hg/cursor.html
rows=list(executeSelect(COLUMNS_TO_SELECT_ORDERED_FOR_DISPLAY,arg,useID=useID))
for row in rows:
printAccountRow(row)
if boolValue(prompt("Edit this account (yes/no)? ")):
values=[]
name=modPrompt("Name",row[COLUMN_NAME])
values.append(name)
URL=modPrompt("URL",row[COLUMN_URL])
values.append(URL)
oldUserName=row[COLUMN_USERNAME]
promptStr="User name OLD: (%s) NEW" % oldUserName
username=askAccountUsername(promptStr,oldUserName)
values.append(username)
email=modPrompt("Email",row[COLUMN_EMAIL])
values.append(email)
originalPassword=row[COLUMN_PASSWORD]
pwd=askAccountPassword("Password OLD: (%s) NEW:" % (originalPassword),"Password generator is available. Type your password or type 'p'/'ps' to generate password or 'c' to use original password.",originalPassword)
values.append(pwd)
comment=modPrompt("Comment",row[COLUMN_COMMENT])
values.append(comment)
updated=formatTimestamp(currentTimestamp())
values.append(updated)
id=row[COLUMN_ID]
#if ID is 0, generate new ID
if int(id) == 0:
id=generateNewID()
values.append(id)
created=row[COLUMN_CREATED]
values.append(created)
sql="update accounts set %s=?,%s=?,%s=?,%s=?,%s=?,%s=?,%s=?,%s=? where %s=?" % (
COLUMN_NAME,
COLUMN_URL,
COLUMN_USERNAME,
COLUMN_EMAIL,
COLUMN_PASSWORD,
COLUMN_COMMENT,
COLUMN_UPDATED,
COLUMN_ID,
COLUMN_CREATED
)
executeSql(sql,tuple(values),commit=True)
saveAccounts()
print("Account updated.")
#copy edited account password to clipboard
where="where id = %s" % id
arg=name
_newrows=rows=executeSelect(COLUMNS_TO_SELECT_ORDERED_FOR_DISPLAY,arg,whereClause=where)
for row in _newrows:
setAccountFieldsToClipboard(row)
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.sessions.models import Session
from django.core.urlresolvers import reverse
from django.db.models import get_model
from django.forms import ModelForm
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from models import Factor, TechGroup, Technology, Relevancy, Answer, Criterion, TechChoice
from utils import pretty_name
class HttpResponseNoContent(HttpResponse):
status_code = 204
def get_session(request):
return Session.objects.get(pk=request.session.session_key)
def render_to(template):
"""
Decorator for Django views that sends returned dict to render_to_response function
with given template and RequestContext as context instance.
If view doesn't return dict then decorator simply returns output.
Additionally view can return two-tuple, which must contain dict as first
element and string with template name as second. This string will
override template name, given as parameter
From: http://www.djangosnippets.org/snippets/821/
Parameters:
- template: template name to use
"""
def renderer(func):
def wrapper(request, *args, **kw):
output = func(request, *args, **kw)
if isinstance(output, (list, tuple)):
return render_to_response(output[1], output[0], RequestContext(request))
elif isinstance(output, dict):
return render_to_response(template, output, RequestContext(request))
return output
return wrapper
return renderer
@render_to('dst/index.html')
def index(request):
try:
return HttpResponseRedirect(settings.START_URL)
except:
return {}
def get_or_create_answers(session):
answers = Answer.objects.filter(session=session)
if not answers.count():
criteria = Criterion.objects.all()
for criterion in criteria:
Answer.objects.create(session=session, criterion=criterion, applicable=False)
return Answer.objects.filter(session=session).order_by('criterion__factor__order', 'criterion__order')
from django.forms.models import modelformset_factory
from django.forms.widgets import HiddenInput, CheckboxInput
class AnswerForm(ModelForm):
def __init__(self, *args, **kwargs):
# change the widget type:
self.base_fields['criterion'].widget = HiddenInput()
super(AnswerForm, self).__init__(*args, **kwargs)
class Meta:
model = Answer
fields = ['id', 'criterion', 'applicable',]
@render_to('dst/factors.html')
def factors(request, model=None, id=None):
request.session['init'] = 'init'
AnswerFormSet = modelformset_factory(
Answer,
form = AnswerForm,
extra = 0,
)
if request.method == 'POST':
formset = AnswerFormSet(request.POST, request.FILES)
if formset.is_valid():
formset.save()
return HttpResponseRedirect(reverse('technologies'))
else:
return HttpResponseRedirect(reverse('factors'))
else:
qs = get_or_create_answers(get_session(request))
formset = AnswerFormSet(queryset=qs)
form_list = [form for form in formset.forms]
change_list = []
factor_list = []
old_factor = ''
for form in formset.forms:
new_factor = form.instance.criterion.factor
factor_list.append(new_factor)
change_list.append(new_factor != old_factor)
form.fields['applicable'].label = pretty_name(str(form.instance.criterion))
old_factor = new_factor
zipped_formlist = zip(form_list, factor_list, change_list)
if model:
help_item = get_model('dst', model).objects.get(id=id)
else:
help_item = None
return {
'formset' : formset,
'zipped_formlist' : zipped_formlist,
'help_item' : help_item,
'session' : request.session,
}
@render_to('dst/factor_help.html')
def factor_help(request, model=None, id=None):
factors = Factor.objects.all()
if model:
help_item = get_model('dst', model).objects.get(id=id)
else:
help_item = None
return {'help_item': help_item}
@render_to('dst/technologies.html')
def technologies(request):
groups = TechGroup.objects.all()
group_techs = []
for group in groups:
techs = Technology.objects.filter(group=group)
for tech in techs:
tech.usable = tech.usability(get_session(request))
tech.available = tech.availability(get_session(request))
group_techs.append(techs)
# if we want to transpose the data:
#all_techs = map(None, *group_techs)
all_techs = zip(groups, group_techs)
return {
'techgroups' : groups,
'all_techs' : all_techs,
}
def tech_choice(request, tech_id):
choice, created = TechChoice.objects.get_or_create(session=get_session(request), technology=Technology.objects.get(pk=tech_id))
if not created:
choice.delete()
return HttpResponseRedirect(reverse('technologies'))
def toggle_button(request, btn_name=''):
if btn_name:
if not request.session.setdefault(btn_name, False):
request.session[btn_name] = True
else:
request.session[btn_name] = False
return HttpResponseNoContent()
def reset_all(request):
request.session.flush()
return HttpResponseRedirect(reverse('factors'))
def reset_techs(request):
TechChoice.objects.filter(session=get_session(request)).delete()
return HttpResponseRedirect(reverse('technologies'))
@render_to('dst/technologies_help.html')
def technologies_help(request,id=None):
# Needs to be refined to filter on selection
#
#user_id = request.session['auth_user_id']
session = get_session(request)
technology = get_object_or_404(Technology, pk=id)
applicable = technology.applicable(session)
relevancy_objects = []
if applicable == technology.TECH_USE_MAYBE:
relevancy_objects = technology.maybe_relevant(session)
elif applicable == technology.TECH_USE_NO:
relevancy_objects = technology.not_relevant(session)
return { 'technology': technology, 'relevancy_objects':relevancy_objects, 'settings': settings}
@render_to('dst/solution.html')
def solution(request):
groups = TechGroup.objects.all()
group_techs = []
for group in groups:
chosen_techs = Technology.objects.filter(group=group).filter(tech_choices__session=get_session(request))
for tech in chosen_techs:
tech.usable = tech.usability(get_session(request))
tech.available = tech.availability(get_session(request))
group_techs.append(chosen_techs)
# if we want to transpose the data:
#all_techs = map(None, *group_techs)
all_techs = zip(groups, group_techs)
return {
'techgroups' : groups,
'all_techs' : all_techs,
}
Add initialisation of button state session variables
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.sessions.models import Session
from django.core.urlresolvers import reverse
from django.db.models import get_model
from django.forms import ModelForm
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from models import Factor, TechGroup, Technology, Relevancy, Answer, Criterion, TechChoice
from utils import pretty_name
class HttpResponseNoContent(HttpResponse):
status_code = 204
def get_session(request):
return Session.objects.get(pk=request.session.session_key)
def render_to(template):
"""
Decorator for Django views that sends returned dict to render_to_response function
with given template and RequestContext as context instance.
If view doesn't return dict then decorator simply returns output.
Additionally view can return two-tuple, which must contain dict as first
element and string with template name as second. This string will
override template name, given as parameter
From: http://www.djangosnippets.org/snippets/821/
Parameters:
- template: template name to use
"""
def renderer(func):
def wrapper(request, *args, **kw):
output = func(request, *args, **kw)
if isinstance(output, (list, tuple)):
return render_to_response(output[1], output[0], RequestContext(request))
elif isinstance(output, dict):
return render_to_response(template, output, RequestContext(request))
return output
return wrapper
return renderer
@render_to('dst/index.html')
def index(request):
try:
return HttpResponseRedirect(settings.START_URL)
except:
return {}
def get_or_create_answers(session):
answers = Answer.objects.filter(session=session)
if not answers.count():
criteria = Criterion.objects.all()
for criterion in criteria:
Answer.objects.create(session=session, criterion=criterion, applicable=False)
return Answer.objects.filter(session=session).order_by('criterion__factor__order', 'criterion__order')
from django.forms.models import modelformset_factory
from django.forms.widgets import HiddenInput, CheckboxInput
class AnswerForm(ModelForm):
def __init__(self, *args, **kwargs):
# change the widget type:
self.base_fields['criterion'].widget = HiddenInput()
super(AnswerForm, self).__init__(*args, **kwargs)
class Meta:
model = Answer
fields = ['id', 'criterion', 'applicable',]
def init_session(session):
uses = 'TECH_USE_NO', 'TECH_USE_MAYBE', 'TECH_USE_YES', 'TECH_USE_NOT_ALLOWED'
btns = [getattr(Technology, use) for use in uses]
buttons = ["toggle_%s" % btn for btn in btns ]
for button in buttons:
if button not in session.keys():
session[button] = False
@render_to('dst/factors.html')
def factors(request, model=None, id=None):
#request.session['button_yes'] = 'init'
AnswerFormSet = modelformset_factory(
Answer,
form = AnswerForm,
extra = 0,
)
init_session(request.session)
if request.method == 'POST':
formset = AnswerFormSet(request.POST, request.FILES)
if formset.is_valid():
formset.save()
return HttpResponseRedirect(reverse('technologies'))
else:
return HttpResponseRedirect(reverse('factors'))
else:
qs = get_or_create_answers(get_session(request))
formset = AnswerFormSet(queryset=qs)
form_list = [form for form in formset.forms]
change_list = []
factor_list = []
old_factor = ''
for form in formset.forms:
new_factor = form.instance.criterion.factor
factor_list.append(new_factor)
change_list.append(new_factor != old_factor)
form.fields['applicable'].label = pretty_name(str(form.instance.criterion))
old_factor = new_factor
zipped_formlist = zip(form_list, factor_list, change_list)
if model:
help_item = get_model('dst', model).objects.get(id=id)
else:
help_item = None
return {
'formset' : formset,
'zipped_formlist' : zipped_formlist,
'help_item' : help_item,
'session' : request.session,
}
@render_to('dst/factor_help.html')
def factor_help(request, model=None, id=None):
factors = Factor.objects.all()
if model:
help_item = get_model('dst', model).objects.get(id=id)
else:
help_item = None
return {'help_item': help_item}
@render_to('dst/technologies.html')
def technologies(request):
groups = TechGroup.objects.all()
group_techs = []
for group in groups:
techs = Technology.objects.filter(group=group)
for tech in techs:
tech.usable = tech.usability(get_session(request))
tech.available = tech.availability(get_session(request))
group_techs.append(techs)
# if we want to transpose the data:
#all_techs = map(None, *group_techs)
all_techs = zip(groups, group_techs)
return {
'techgroups' : groups,
'all_techs' : all_techs,
}
def tech_choice(request, tech_id):
choice, created = TechChoice.objects.get_or_create(session=get_session(request), technology=Technology.objects.get(pk=tech_id))
if not created:
choice.delete()
return HttpResponseRedirect(reverse('technologies'))
def toggle_button(request, btn_name=''):
if btn_name:
if not request.session.setdefault(btn_name, False):
request.session[btn_name] = True
else:
request.session[btn_name] = False
return HttpResponseNoContent()
def reset_all(request):
request.session.flush()
return HttpResponseRedirect(reverse('factors'))
def reset_techs(request):
TechChoice.objects.filter(session=get_session(request)).delete()
return HttpResponseRedirect(reverse('technologies'))
@render_to('dst/technologies_help.html')
def technologies_help(request,id=None):
# Needs to be refined to filter on selection
#
#user_id = request.session['auth_user_id']
session = get_session(request)
technology = get_object_or_404(Technology, pk=id)
applicable = technology.applicable(session)
relevancy_objects = []
if applicable == technology.TECH_USE_MAYBE:
relevancy_objects = technology.maybe_relevant(session)
elif applicable == technology.TECH_USE_NO:
relevancy_objects = technology.not_relevant(session)
return { 'technology': technology, 'relevancy_objects':relevancy_objects, 'settings': settings}
@render_to('dst/solution.html')
def solution(request):
groups = TechGroup.objects.all()
group_techs = []
for group in groups:
chosen_techs = Technology.objects.filter(group=group).filter(tech_choices__session=get_session(request))
for tech in chosen_techs:
tech.usable = tech.usability(get_session(request))
tech.available = tech.availability(get_session(request))
group_techs.append(chosen_techs)
# if we want to transpose the data:
#all_techs = map(None, *group_techs)
all_techs = zip(groups, group_techs)
return {
'techgroups' : groups,
'all_techs' : all_techs,
} |
# -*- Mode: Python; coding: iso-8859-1 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Stoqdrivers
## Copyright (C) 2005 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
## USA.
##
## Author(s): Johan Dahlin <jdahlin@async.com.br>
##
from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from stoqdrivers.log import Logger
class SerialBase(Serial, Logger):
log_domain = 'serial'
# All commands will have this prefixed
CMD_PREFIX = '\x1b'
CMD_SUFFIX = ''
# used by readline()
EOL_DELIMIT = '\r'
# Set this attribute to avoid sending data to printer
DEBUG_MODE = 0
def __init__(self, device, baudrate=9600, bytesize=EIGHTBITS,
parity=PARITY_NONE, stopbits=STOPBITS_ONE):
Logger.__init__(self)
self.info('opening device %s' % device)
Serial.__init__(self, device, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits)
self.setDTR(True)
self.flushInput()
self.flushOutput()
self.setTimeout(3)
def read_insist(self, n_bytes):
"""
This is a more insistent read, that will try to read that many
bytes a determined number of times.
"""
number_of_tries = 12
data = ""
while True:
data_left = n_bytes - len(data)
if (data_left > 0) and (number_of_tries > 0):
data += Serial.read(self, data_left)
data_left = n_bytes - len(data)
number_of_tries -= 1
else:
break
return data
def readline(self):
if self.DEBUG_MODE:
return
c = ''
out = ''
while True:
c = self.read(1)
if c == self.EOL_DELIMIT:
self.debug('<<< %r' % out)
return out
out += c
def write(self, data):
self.debug(">>> %r (%dbytes)" % (data, len(data)))
Serial.write(self, data)
def writeline(self, data):
if self.DEBUG_MODE:
return
self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
return self.readline()
def handle_error(self, error, command):
"""
Should be implemented by subclass
@param error:
@param command:
"""
raise NotImplementedError
Removing an useless method
# -*- Mode: Python; coding: iso-8859-1 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Stoqdrivers
## Copyright (C) 2005 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
## USA.
##
## Author(s): Johan Dahlin <jdahlin@async.com.br>
##
from serial import Serial, EIGHTBITS, PARITY_NONE, STOPBITS_ONE
from stoqdrivers.log import Logger
class SerialBase(Serial, Logger):
log_domain = 'serial'
# All commands will have this prefixed
CMD_PREFIX = '\x1b'
CMD_SUFFIX = ''
# used by readline()
EOL_DELIMIT = '\r'
# Set this attribute to avoid sending data to printer
DEBUG_MODE = 0
def __init__(self, device, baudrate=9600, bytesize=EIGHTBITS,
parity=PARITY_NONE, stopbits=STOPBITS_ONE):
Logger.__init__(self)
self.info('opening device %s' % device)
Serial.__init__(self, device, baudrate=baudrate,
bytesize=bytesize, parity=parity,
stopbits=stopbits)
self.setDTR(True)
self.flushInput()
self.flushOutput()
self.setTimeout(3)
def read_insist(self, n_bytes):
"""
This is a more insistent read, that will try to read that many
bytes a determined number of times.
"""
number_of_tries = 12
data = ""
while True:
data_left = n_bytes - len(data)
if (data_left > 0) and (number_of_tries > 0):
data += Serial.read(self, data_left)
data_left = n_bytes - len(data)
number_of_tries -= 1
else:
break
return data
def readline(self):
if self.DEBUG_MODE:
return
c = ''
out = ''
while True:
c = self.read(1)
if c == self.EOL_DELIMIT:
self.debug('<<< %r' % out)
return out
out += c
def write(self, data):
self.debug(">>> %r (%dbytes)" % (data, len(data)))
Serial.write(self, data)
def writeline(self, data):
if self.DEBUG_MODE:
return
self.write(self.CMD_PREFIX + data + self.CMD_SUFFIX)
return self.readline()
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import itertools
import random
import re
import string
import time
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlencode,
compat_ord,
)
from ..utils import (
ExtractorError,
get_element_by_attribute,
)
class YoukuIE(InfoExtractor):
IE_NAME = 'youku'
IE_DESC = '优酷'
_VALID_URL = r'''(?x)
(?:
http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
youku:)
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
'''
_TESTS = [{
# MD5 is unstable
'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html',
'info_dict': {
'id': 'XMTc1ODE5Njcy_part1',
'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.',
'ext': 'flv'
}
}, {
'url': 'http://player.youku.com/player.php/sid/XNDgyMDQ2NTQw/v.swf',
'only_matching': True,
}, {
'url': 'http://v.youku.com/v_show/id_XODgxNjg1Mzk2_ev_1.html',
'info_dict': {
'id': 'XODgxNjg1Mzk2',
'title': '武媚娘传奇 85',
},
'playlist_count': 11,
'skip': 'Available in China only',
}, {
'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html',
'info_dict': {
'id': 'XMTI1OTczNDM5Mg',
'title': '花千骨 04',
},
'playlist_count': 13,
}, {
'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html',
'note': 'Video protected with password',
'info_dict': {
'id': 'XNjA1NzA2Njgw',
'title': '邢義田复旦讲座之想象中的胡人—从“左衽孔子”说起',
},
'playlist_count': 19,
'params': {
'videopassword': '100600',
},
}, {
# /play/get.json contains streams with "channel_type":"tail"
'url': 'http://v.youku.com/v_show/id_XOTUxMzg4NDMy.html',
'info_dict': {
'id': 'XOTUxMzg4NDMy',
'title': '我的世界☆明月庄主☆车震猎杀☆杀人艺术Minecraft',
},
'playlist_count': 6,
}]
def construct_video_urls(self, data):
# get sid, token
def yk_t(s1, s2):
ls = list(range(256))
t = 0
for i in range(256):
t = (t + ls[i] + compat_ord(s1[i % len(s1)])) % 256
ls[i], ls[t] = ls[t], ls[i]
s = bytearray()
x, y = 0, 0
for i in range(len(s2)):
y = (y + 1) % 256
x = (x + ls[y]) % 256
ls[x], ls[y] = ls[y], ls[x]
s.append(compat_ord(s2[i]) ^ ls[(ls[x] + ls[y]) % 256])
return bytes(s)
sid, token = yk_t(
b'becaf9be', base64.b64decode(data['security']['encrypt_string'].encode('ascii'))
).decode('ascii').split('_')
# get oip
oip = data['security']['ip']
fileid_dict = {}
for stream in data['stream']:
if stream.get('channel_type') == 'tail':
continue
format = stream.get('stream_type')
fileid = stream['segs'][0]['fileid']
fileid_dict[format] = fileid
def get_fileid(format, n):
number = hex(int(str(n), 10))[2:].upper()
if len(number) == 1:
number = '0' + number
streamfileids = fileid_dict[format]
fileid = streamfileids[0:8] + number + streamfileids[10:]
return fileid
# get ep
def generate_ep(format, n):
fileid = get_fileid(format, n)
ep_t = yk_t(
b'bf7e5f01',
('%s_%s_%s' % (sid, fileid, token)).encode('ascii')
)
ep = base64.b64encode(ep_t).decode('ascii')
return ep
# generate video_urls
video_urls_dict = {}
for stream in data['stream']:
if stream.get('channel_type') == 'tail':
continue
format = stream.get('stream_type')
video_urls = []
for dt in stream['segs']:
n = str(stream['segs'].index(dt))
param = {
'K': dt['key'],
'hd': self.get_hd(format),
'myp': 0,
'ypp': 0,
'ctype': 12,
'ev': 1,
'token': token,
'oip': oip,
'ep': generate_ep(format, n)
}
video_url = \
'http://k.youku.com/player/getFlvPath/' + \
'sid/' + sid + \
'_00' + \
'/st/' + self.parse_ext_l(format) + \
'/fileid/' + get_fileid(format, n) + '?' + \
compat_urllib_parse_urlencode(param)
video_urls.append(video_url)
video_urls_dict[format] = video_urls
return video_urls_dict
@staticmethod
def get_ysuid():
return '%d%s' % (int(time.time()), ''.join([
random.choice(string.ascii_letters) for i in range(3)]))
def get_hd(self, fm):
hd_id_dict = {
'3gp': '0',
'3gphd': '1',
'flv': '0',
'flvhd': '0',
'mp4': '1',
'mp4hd': '1',
'mp4hd2': '1',
'mp4hd3': '1',
'hd2': '2',
'hd3': '3',
}
return hd_id_dict[fm]
def parse_ext_l(self, fm):
ext_dict = {
'3gp': 'flv',
'3gphd': 'mp4',
'flv': 'flv',
'flvhd': 'flv',
'mp4': 'mp4',
'mp4hd': 'mp4',
'mp4hd2': 'flv',
'mp4hd3': 'flv',
'hd2': 'flv',
'hd3': 'flv',
}
return ext_dict[fm]
def get_format_name(self, fm):
_dict = {
'3gp': 'h6',
'3gphd': 'h5',
'flv': 'h4',
'flvhd': 'h4',
'mp4': 'h3',
'mp4hd': 'h3',
'mp4hd2': 'h4',
'mp4hd3': 'h4',
'hd2': 'h2',
'hd3': 'h1',
}
return _dict[fm]
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('youku.com', '__ysuid', self.get_ysuid())
def retrieve_data(req_url, note):
headers = {
'Referer': req_url,
}
headers.update(self.geo_verification_headers())
self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com')
raw_data = self._download_json(req_url, video_id, note=note, headers=headers)
return raw_data['data']
video_password = self._downloader.params.get('videopassword')
# request basic data
basic_data_url = 'http://play.youku.com/play/get.json?vid=%s&ct=12' % video_id
if video_password:
basic_data_url += '&pwd=%s' % video_password
data = retrieve_data(basic_data_url, 'Downloading JSON metadata')
error = data.get('error')
if error:
error_note = error.get('note')
if error_note is not None and '因版权原因无法观看此视频' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is available in China only', expected=True)
elif error_note and '该视频被设为私密' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is private', expected=True)
else:
msg = 'Youku server reported error %i' % error.get('code')
if error_note is not None:
msg += ': ' + error_note
raise ExtractorError(msg)
# get video title
title = data['video']['title']
# generate video_urls_dict
video_urls_dict = self.construct_video_urls(data)
# construct info
entries = [{
'id': '%s_part%d' % (video_id, i + 1),
'title': title,
'formats': [],
# some formats are not available for all parts, we have to detect
# which one has all
} for i in range(max(len(v.get('segs')) for v in data['stream']))]
for stream in data['stream']:
if stream.get('channel_type') == 'tail':
continue
fm = stream.get('stream_type')
video_urls = video_urls_dict[fm]
for video_url, seg, entry in zip(video_urls, stream['segs'], entries):
entry['formats'].append({
'url': video_url,
'format_id': self.get_format_name(fm),
'ext': self.parse_ext_l(fm),
'filesize': int(seg['size']),
'width': stream.get('width'),
'height': stream.get('height'),
})
return {
'_type': 'multi_video',
'id': video_id,
'title': title,
'entries': entries,
}
class YoukuShowIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?youku\.com/show_page/id_(?P<id>[0-9a-z]+)\.html'
IE_NAME = 'youku:show'
_TEST = {
'url': 'http://www.youku.com/show_page/id_zc7c670be07ff11e48b3f.html',
'info_dict': {
'id': 'zc7c670be07ff11e48b3f',
'title': '花千骨 未删减版',
'description': 'md5:578d4f2145ae3f9128d9d4d863312910',
},
'playlist_count': 50,
}
_PAGE_SIZE = 40
def _find_videos_in_page(self, webpage):
videos = re.findall(
r'<li><a[^>]+href="(?P<url>https?://v\.youku\.com/[^"]+)"[^>]+title="(?P<title>[^"]+)"', webpage)
return [
self.url_result(video_url, YoukuIE.ie_key(), title)
for video_url, title in videos]
def _real_extract(self, url):
show_id = self._match_id(url)
webpage = self._download_webpage(url, show_id)
entries = self._find_videos_in_page(webpage)
playlist_title = self._html_search_regex(
r'<span[^>]+class="name">([^<]+)</span>', webpage, 'playlist title', fatal=False)
detail_div = get_element_by_attribute('class', 'detail', webpage) or ''
playlist_description = self._html_search_regex(
r'<span[^>]+style="display:none"[^>]*>([^<]+)</span>',
detail_div, 'playlist description', fatal=False)
for idx in itertools.count(1):
episodes_page = self._download_webpage(
'http://www.youku.com/show_episode/id_%s.html' % show_id,
show_id, query={'divid': 'reload_%d' % (idx * self._PAGE_SIZE + 1)},
note='Downloading episodes page %d' % idx)
new_entries = self._find_videos_in_page(episodes_page)
entries.extend(new_entries)
if len(new_entries) < self._PAGE_SIZE:
break
return self.playlist_result(entries, show_id, playlist_title, playlist_description)
[youku] Keep old fileid extraction code as fallback (#12741)
# coding: utf-8
from __future__ import unicode_literals
import base64
import itertools
import random
import re
import string
import time
from .common import InfoExtractor
from ..compat import (
compat_ord,
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
get_element_by_attribute,
try_get,
)
class YoukuIE(InfoExtractor):
IE_NAME = 'youku'
IE_DESC = '优酷'
_VALID_URL = r'''(?x)
(?:
http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)|
youku:)
(?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|)
'''
_TESTS = [{
# MD5 is unstable
'url': 'http://v.youku.com/v_show/id_XMTc1ODE5Njcy.html',
'info_dict': {
'id': 'XMTc1ODE5Njcy_part1',
'title': '★Smile﹗♡ Git Fresh -Booty Music舞蹈.',
'ext': 'flv'
}
}, {
'url': 'http://player.youku.com/player.php/sid/XNDgyMDQ2NTQw/v.swf',
'only_matching': True,
}, {
'url': 'http://v.youku.com/v_show/id_XODgxNjg1Mzk2_ev_1.html',
'info_dict': {
'id': 'XODgxNjg1Mzk2',
'title': '武媚娘传奇 85',
},
'playlist_count': 11,
'skip': 'Available in China only',
}, {
'url': 'http://v.youku.com/v_show/id_XMTI1OTczNDM5Mg==.html',
'info_dict': {
'id': 'XMTI1OTczNDM5Mg',
'title': '花千骨 04',
},
'playlist_count': 13,
}, {
'url': 'http://v.youku.com/v_show/id_XNjA1NzA2Njgw.html',
'note': 'Video protected with password',
'info_dict': {
'id': 'XNjA1NzA2Njgw',
'title': '邢義田复旦讲座之想象中的胡人—从“左衽孔子”说起',
},
'playlist_count': 19,
'params': {
'videopassword': '100600',
},
}, {
# /play/get.json contains streams with "channel_type":"tail"
'url': 'http://v.youku.com/v_show/id_XOTUxMzg4NDMy.html',
'info_dict': {
'id': 'XOTUxMzg4NDMy',
'title': '我的世界☆明月庄主☆车震猎杀☆杀人艺术Minecraft',
},
'playlist_count': 6,
}]
def construct_video_urls(self, data):
# get sid, token
def yk_t(s1, s2):
ls = list(range(256))
t = 0
for i in range(256):
t = (t + ls[i] + compat_ord(s1[i % len(s1)])) % 256
ls[i], ls[t] = ls[t], ls[i]
s = bytearray()
x, y = 0, 0
for i in range(len(s2)):
y = (y + 1) % 256
x = (x + ls[y]) % 256
ls[x], ls[y] = ls[y], ls[x]
s.append(compat_ord(s2[i]) ^ ls[(ls[x] + ls[y]) % 256])
return bytes(s)
sid, token = yk_t(
b'becaf9be', base64.b64decode(data['security']['encrypt_string'].encode('ascii'))
).decode('ascii').split('_')
# get oip
oip = data['security']['ip']
fileid_dict = {}
for stream in data['stream']:
if stream.get('channel_type') == 'tail':
continue
format = stream.get('stream_type')
fileid = try_get(
stream, lambda x: x['segs'][0]['fileid'],
compat_str) or stream['stream_fileid']
fileid_dict[format] = fileid
def get_fileid(format, n):
number = hex(int(str(n), 10))[2:].upper()
if len(number) == 1:
number = '0' + number
streamfileids = fileid_dict[format]
fileid = streamfileids[0:8] + number + streamfileids[10:]
return fileid
# get ep
def generate_ep(format, n):
fileid = get_fileid(format, n)
ep_t = yk_t(
b'bf7e5f01',
('%s_%s_%s' % (sid, fileid, token)).encode('ascii')
)
ep = base64.b64encode(ep_t).decode('ascii')
return ep
# generate video_urls
video_urls_dict = {}
for stream in data['stream']:
if stream.get('channel_type') == 'tail':
continue
format = stream.get('stream_type')
video_urls = []
for dt in stream['segs']:
n = str(stream['segs'].index(dt))
param = {
'K': dt['key'],
'hd': self.get_hd(format),
'myp': 0,
'ypp': 0,
'ctype': 12,
'ev': 1,
'token': token,
'oip': oip,
'ep': generate_ep(format, n)
}
video_url = \
'http://k.youku.com/player/getFlvPath/' + \
'sid/' + sid + \
'_00' + \
'/st/' + self.parse_ext_l(format) + \
'/fileid/' + get_fileid(format, n) + '?' + \
compat_urllib_parse_urlencode(param)
video_urls.append(video_url)
video_urls_dict[format] = video_urls
return video_urls_dict
@staticmethod
def get_ysuid():
return '%d%s' % (int(time.time()), ''.join([
random.choice(string.ascii_letters) for i in range(3)]))
def get_hd(self, fm):
hd_id_dict = {
'3gp': '0',
'3gphd': '1',
'flv': '0',
'flvhd': '0',
'mp4': '1',
'mp4hd': '1',
'mp4hd2': '1',
'mp4hd3': '1',
'hd2': '2',
'hd3': '3',
}
return hd_id_dict[fm]
def parse_ext_l(self, fm):
ext_dict = {
'3gp': 'flv',
'3gphd': 'mp4',
'flv': 'flv',
'flvhd': 'flv',
'mp4': 'mp4',
'mp4hd': 'mp4',
'mp4hd2': 'flv',
'mp4hd3': 'flv',
'hd2': 'flv',
'hd3': 'flv',
}
return ext_dict[fm]
def get_format_name(self, fm):
_dict = {
'3gp': 'h6',
'3gphd': 'h5',
'flv': 'h4',
'flvhd': 'h4',
'mp4': 'h3',
'mp4hd': 'h3',
'mp4hd2': 'h4',
'mp4hd3': 'h4',
'hd2': 'h2',
'hd3': 'h1',
}
return _dict[fm]
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('youku.com', '__ysuid', self.get_ysuid())
def retrieve_data(req_url, note):
headers = {
'Referer': req_url,
}
headers.update(self.geo_verification_headers())
self._set_cookie('youku.com', 'xreferrer', 'http://www.youku.com')
raw_data = self._download_json(req_url, video_id, note=note, headers=headers)
return raw_data['data']
video_password = self._downloader.params.get('videopassword')
# request basic data
basic_data_url = 'http://play.youku.com/play/get.json?vid=%s&ct=12' % video_id
if video_password:
basic_data_url += '&pwd=%s' % video_password
data = retrieve_data(basic_data_url, 'Downloading JSON metadata')
error = data.get('error')
if error:
error_note = error.get('note')
if error_note is not None and '因版权原因无法观看此视频' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is available in China only', expected=True)
elif error_note and '该视频被设为私密' in error_note:
raise ExtractorError(
'Youku said: Sorry, this video is private', expected=True)
else:
msg = 'Youku server reported error %i' % error.get('code')
if error_note is not None:
msg += ': ' + error_note
raise ExtractorError(msg)
# get video title
title = data['video']['title']
# generate video_urls_dict
video_urls_dict = self.construct_video_urls(data)
# construct info
entries = [{
'id': '%s_part%d' % (video_id, i + 1),
'title': title,
'formats': [],
# some formats are not available for all parts, we have to detect
# which one has all
} for i in range(max(len(v.get('segs')) for v in data['stream']))]
for stream in data['stream']:
if stream.get('channel_type') == 'tail':
continue
fm = stream.get('stream_type')
video_urls = video_urls_dict[fm]
for video_url, seg, entry in zip(video_urls, stream['segs'], entries):
entry['formats'].append({
'url': video_url,
'format_id': self.get_format_name(fm),
'ext': self.parse_ext_l(fm),
'filesize': int(seg['size']),
'width': stream.get('width'),
'height': stream.get('height'),
})
return {
'_type': 'multi_video',
'id': video_id,
'title': title,
'entries': entries,
}
class YoukuShowIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?youku\.com/show_page/id_(?P<id>[0-9a-z]+)\.html'
IE_NAME = 'youku:show'
_TEST = {
'url': 'http://www.youku.com/show_page/id_zc7c670be07ff11e48b3f.html',
'info_dict': {
'id': 'zc7c670be07ff11e48b3f',
'title': '花千骨 未删减版',
'description': 'md5:578d4f2145ae3f9128d9d4d863312910',
},
'playlist_count': 50,
}
_PAGE_SIZE = 40
def _find_videos_in_page(self, webpage):
videos = re.findall(
r'<li><a[^>]+href="(?P<url>https?://v\.youku\.com/[^"]+)"[^>]+title="(?P<title>[^"]+)"', webpage)
return [
self.url_result(video_url, YoukuIE.ie_key(), title)
for video_url, title in videos]
def _real_extract(self, url):
show_id = self._match_id(url)
webpage = self._download_webpage(url, show_id)
entries = self._find_videos_in_page(webpage)
playlist_title = self._html_search_regex(
r'<span[^>]+class="name">([^<]+)</span>', webpage, 'playlist title', fatal=False)
detail_div = get_element_by_attribute('class', 'detail', webpage) or ''
playlist_description = self._html_search_regex(
r'<span[^>]+style="display:none"[^>]*>([^<]+)</span>',
detail_div, 'playlist description', fatal=False)
for idx in itertools.count(1):
episodes_page = self._download_webpage(
'http://www.youku.com/show_episode/id_%s.html' % show_id,
show_id, query={'divid': 'reload_%d' % (idx * self._PAGE_SIZE + 1)},
note='Downloading episodes page %d' % idx)
new_entries = self._find_videos_in_page(episodes_page)
entries.extend(new_entries)
if len(new_entries) < self._PAGE_SIZE:
break
return self.playlist_result(entries, show_id, playlist_title, playlist_description)
|
#!/usr/bin/env python
"""`This`_ is what an ideal Python script might look like.
.. _This: https://github.com/wcmaier/python-script/blob/master/script.py
The script can be used as a template for real scripts that will have several
important properties; namely, they will be:
* readable (:PEP:`8`);
* portable (by using things like :class:`subprocess.Popen` and :mod:`os`);
* testable (thanks to a simple unit and functional testing framework that is
itself tested); and
* configurable (users can change logging verbosity and other runtime details
on the command line).
All of this is accomplished using only modules from the Python standard library
that are available in every installation.
`Git`_ and `Mercurial`_ repositories for this script can be found at `Github`_ and
`Bitbucket`_, respectively::
$ git clone git://github.com/wcmaier/python-script.git
$ hg clone http://bitbucket.org/wcmaier/python-script
.. _Git: http://git-scm.com/
.. _Mercurial: http://mercurial.selenic.com/
.. _Github: http://github.com/wcmaier/python-script
.. _Bitbucket: http://bitbucket.org/wcmaier/python-script
"""
__license__ = """\
Copyright (c) 2010 Will Maier <willmaier@ml1.net>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\
"""
import logging
import optparse
import sys
# NullHandler was added in Python 3.1.
try:
NullHandler = logging.NullHandler
except AttributeError:
class NullHandler(logging.Handler):
def emit(self, record): pass
# Add a do-nothing NullHandler to the module logger to prevent "No handlers
# could be found" errors. The calling code can still add other, more useful
# handlers, or otherwise configure logging.
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
def parseargs(argv):
"""Parse command line arguments.
Returns a tuple (*opts*, *args*), where *opts* is an
:class:`optparse.Values` instance and *args* is the list of arguments left
over after processing.
:param argv: a list of command line arguments, usually :data:`sys.argv`.
"""
prog = argv[0]
parser = optparse.OptionParser(prog=prog)
parser.allow_interspersed_args = False
defaults = {
"quiet": 0,
"silent": False,
"verbose": 0,
}
# Global options.
parser.add_option("-q", "--quiet", dest="quiet",
default=defaults["quiet"], action="count",
help="decrease the logging verbosity")
parser.add_option("-s", "--silent", dest="silent",
default=defaults["silent"], action="store_true",
help="silence the logger")
parser.add_option("-v", "--verbose", dest="verbose",
default=defaults["verbose"], action="count",
help="increase the logging verbosity")
(opts, args) = parser.parse_args(args=argv[1:])
return (opts, args)
def main(argv, out=None, err=None):
"""Main entry point.
Returns a value that can be understood by :func:`sys.exit`.
:param argv: a list of command line arguments, usually :data:`sys.argv`.
:param out: stream to write messages; :data:`sys.stdout` if None.
:param err: stream to write error messages; :data:`sys.stderr` if None.
"""
if out is None: # pragma: nocover
out = sys.stdout
if err is None: # pragma: nocover
err = sys.stderr
(opts, args) = parseargs(argv)
level = logging.WARNING - ((opts.verbose - opts.quiet) * 10)
if opts.silent:
level = logging.CRITICAL + 1
format = "%(message)s"
handler = logging.StreamHandler(err)
handler.setFormatter(logging.Formatter(format))
log.addHandler(handler)
log.setLevel(level)
log.debug("Ready to run")
if __name__ == "__main__": # pragma: nocover
sys.exit(main(sys.argv))
# Script unit and functional tests. These tests are defined after the '__name__
# == "__main__"' idiom so that they aren't loaded when the script is executed.
# If the script (or a symlink to the script) has the usual .py filename
# extension, these tests may be run as follows:
#
# $ python -m unittest path/to/script.py (Python 3.X/unittest2)
# $ nosetests path/to/script.py
#
# If the script does not have the .py extension, the scriptloader nose plugin
# can be used instead:
#
# $ pip install scriptloader
# $ nosetests --with-scriptloader path/to/script
# Override the global logger instance with one from a special "tests" namespace.
name = log.name
log = logging.getLogger("%s.tests" % name)
import os
import shutil
import subprocess
import tempfile
import unittest
def getpyfile(filename, split=os.path.splitext, exists=os.path.exists):
"""Return the .py file for a filename.
Resolves things like .pyo and .pyc files to the original .py. If *filename*
doesn't have a .py extension, it will be returned as-is.
:param filename: the path to a file.
:param split: a function to split extensions from basenames, usually :func:`os.path.splitext`.
:param exists: a function to determine whether a file exists, usually :func:`os.path.exists`.
"""
sourcefile = filename
base, ext = split(filename)
if ext[:3] == ".py":
sourcefile = base + ".py"
if not exists(sourcefile):
sourcefile = filename
return sourcefile
class TestMain(unittest.TestCase):
def test_aunittest(self):
"""This is a dummy unit test."""
self.assertEqual(1 + 1, 2)
class TestFunctional(unittest.TestCase):
"""Functional tests.
These tests build a temporary environment and run the script in it.
"""
def setUp(self):
"""Prepare for a test.
This method builds an artificial runtime environment, creates a
temporary directory and sets it as the working directory.
"""
unittest.TestCase.setUp(self)
self.processes = []
self.env = {
"PATH": os.environ["PATH"],
"LANG": "C",
}
self.tmpdir = tempfile.mkdtemp(prefix=name + "-test-")
self.oldcwd = os.getcwd()
log.debug("Initializing test directory %r", self.tmpdir)
os.chdir(self.tmpdir)
def tearDown(self):
"""Clean up after a test.
This method destroys the temporary directory, resets the working
directory and reaps any leftover subprocesses.
"""
unittest.TestCase.tearDown(self)
log.debug("Cleaning up test directory %r", self.tmpdir)
shutil.rmtree(self.tmpdir)
os.chdir(self.oldcwd)
while self.processes:
process = self.processes.pop()
log.debug("Reaping test process with PID %d", process.pid)
try:
process.kill()
except OSError, e:
if e.errno != 3:
raise
def sub(self, *args, **kwargs):
"""Run a subprocess.
Returns a tuple (*process*, *stdout*, *stderr*). If the *communicate*
keyword argument is True, *stdout* and *stderr* will be strings.
Otherwise, they will be None. *process* is a :class:`subprocess.Popen`
instance. By default, the path to the script itself will be used as the
executable and *args* will be passed as arguments to it.
.. note::
The value of *executable* will be prepended to *args*.
:param args: arguments to be passed to :class:`subprocess.Popen`.
:param kwargs: keyword arguments to be passed to :class:`subprocess.Popen`.
:param communicate: if True, call :meth:`subprocess.Popen.communicate` after creating the subprocess.
:param executable: if present, the path to a program to execute instead of this script.
"""
_kwargs = {
"executable": os.path.abspath(getpyfile(__file__)),
"stdin": subprocess.PIPE,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"env": self.env,
}
communicate = kwargs.pop("communicate", True)
_kwargs.update(kwargs)
kwargs = _kwargs
args = [kwargs["executable"]] + list(args)
log.debug("Creating test process %r, %r", args, kwargs)
process = subprocess.Popen(args, **kwargs)
if communicate is True:
stdout, stderr = process.communicate()
else:
stdout, stderr = None, None
self.processes.append(process)
return process, stdout, stderr
def test_functionaltest(self):
"""This is a dummy functional test."""
proc, stdout, stderr = self.sub("-h")
self.assertEqual(proc.returncode, 0)
self.assertTrue("script.py" in stdout)
Replace hardcoded name with reference to new global value.
#!/usr/bin/env python
"""`This`_ is what an ideal Python script might look like.
.. _This: https://github.com/wcmaier/python-script/blob/master/script.py
The script can be used as a template for real scripts that will have several
important properties; namely, they will be:
* readable (:PEP:`8`);
* portable (by using things like :class:`subprocess.Popen` and :mod:`os`);
* testable (thanks to a simple unit and functional testing framework that is
itself tested); and
* configurable (users can change logging verbosity and other runtime details
on the command line).
All of this is accomplished using only modules from the Python standard library
that are available in every installation.
`Git`_ and `Mercurial`_ repositories for this script can be found at `Github`_ and
`Bitbucket`_, respectively::
$ git clone git://github.com/wcmaier/python-script.git
$ hg clone http://bitbucket.org/wcmaier/python-script
.. _Git: http://git-scm.com/
.. _Mercurial: http://mercurial.selenic.com/
.. _Github: http://github.com/wcmaier/python-script
.. _Bitbucket: http://bitbucket.org/wcmaier/python-script
"""
__license__ = """\
Copyright (c) 2010 Will Maier <willmaier@ml1.net>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\
"""
import logging
import optparse
import sys
# NullHandler was added in Python 3.1.
try:
NullHandler = logging.NullHandler
except AttributeError:
class NullHandler(logging.Handler):
def emit(self, record): pass
# Add a do-nothing NullHandler to the module logger to prevent "No handlers
# could be found" errors. The calling code can still add other, more useful
# handlers, or otherwise configure logging.
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
def parseargs(argv):
"""Parse command line arguments.
Returns a tuple (*opts*, *args*), where *opts* is an
:class:`optparse.Values` instance and *args* is the list of arguments left
over after processing.
:param argv: a list of command line arguments, usually :data:`sys.argv`.
"""
prog = argv[0]
parser = optparse.OptionParser(prog=prog)
parser.allow_interspersed_args = False
defaults = {
"quiet": 0,
"silent": False,
"verbose": 0,
}
# Global options.
parser.add_option("-q", "--quiet", dest="quiet",
default=defaults["quiet"], action="count",
help="decrease the logging verbosity")
parser.add_option("-s", "--silent", dest="silent",
default=defaults["silent"], action="store_true",
help="silence the logger")
parser.add_option("-v", "--verbose", dest="verbose",
default=defaults["verbose"], action="count",
help="increase the logging verbosity")
(opts, args) = parser.parse_args(args=argv[1:])
return (opts, args)
def main(argv, out=None, err=None):
"""Main entry point.
Returns a value that can be understood by :func:`sys.exit`.
:param argv: a list of command line arguments, usually :data:`sys.argv`.
:param out: stream to write messages; :data:`sys.stdout` if None.
:param err: stream to write error messages; :data:`sys.stderr` if None.
"""
if out is None: # pragma: nocover
out = sys.stdout
if err is None: # pragma: nocover
err = sys.stderr
(opts, args) = parseargs(argv)
level = logging.WARNING - ((opts.verbose - opts.quiet) * 10)
if opts.silent:
level = logging.CRITICAL + 1
format = "%(message)s"
handler = logging.StreamHandler(err)
handler.setFormatter(logging.Formatter(format))
log.addHandler(handler)
log.setLevel(level)
log.debug("Ready to run")
if __name__ == "__main__": # pragma: nocover
sys.exit(main(sys.argv))
# Script unit and functional tests. These tests are defined after the '__name__
# == "__main__"' idiom so that they aren't loaded when the script is executed.
# If the script (or a symlink to the script) has the usual .py filename
# extension, these tests may be run as follows:
#
# $ python -m unittest path/to/script.py (Python 3.X/unittest2)
# $ nosetests path/to/script.py
#
# If the script does not have the .py extension, the scriptloader nose plugin
# can be used instead:
#
# $ pip install scriptloader
# $ nosetests --with-scriptloader path/to/script
# Override the global logger instance with one from a special "tests" namespace.
name = log.name
log = logging.getLogger("%s.tests" % name)
import os
import shutil
import subprocess
import tempfile
import unittest
def getpyfile(filename, split=os.path.splitext, exists=os.path.exists):
"""Return the .py file for a filename.
Resolves things like .pyo and .pyc files to the original .py. If *filename*
doesn't have a .py extension, it will be returned as-is.
:param filename: the path to a file.
:param split: a function to split extensions from basenames, usually :func:`os.path.splitext`.
:param exists: a function to determine whether a file exists, usually :func:`os.path.exists`.
"""
sourcefile = filename
base, ext = split(filename)
if ext[:3] == ".py":
sourcefile = base + ".py"
if not exists(sourcefile):
sourcefile = filename
return sourcefile
class TestMain(unittest.TestCase):
def test_aunittest(self):
"""This is a dummy unit test."""
self.assertEqual(1 + 1, 2)
class TestFunctional(unittest.TestCase):
"""Functional tests.
These tests build a temporary environment and run the script in it.
"""
def setUp(self):
"""Prepare for a test.
This method builds an artificial runtime environment, creates a
temporary directory and sets it as the working directory.
"""
unittest.TestCase.setUp(self)
self.processes = []
self.env = {
"PATH": os.environ["PATH"],
"LANG": "C",
}
self.tmpdir = tempfile.mkdtemp(prefix=name + "-test-")
self.oldcwd = os.getcwd()
log.debug("Initializing test directory %r", self.tmpdir)
os.chdir(self.tmpdir)
def tearDown(self):
"""Clean up after a test.
This method destroys the temporary directory, resets the working
directory and reaps any leftover subprocesses.
"""
unittest.TestCase.tearDown(self)
log.debug("Cleaning up test directory %r", self.tmpdir)
shutil.rmtree(self.tmpdir)
os.chdir(self.oldcwd)
while self.processes:
process = self.processes.pop()
log.debug("Reaping test process with PID %d", process.pid)
try:
process.kill()
except OSError, e:
if e.errno != 3:
raise
def sub(self, *args, **kwargs):
"""Run a subprocess.
Returns a tuple (*process*, *stdout*, *stderr*). If the *communicate*
keyword argument is True, *stdout* and *stderr* will be strings.
Otherwise, they will be None. *process* is a :class:`subprocess.Popen`
instance. By default, the path to the script itself will be used as the
executable and *args* will be passed as arguments to it.
.. note::
The value of *executable* will be prepended to *args*.
:param args: arguments to be passed to :class:`subprocess.Popen`.
:param kwargs: keyword arguments to be passed to :class:`subprocess.Popen`.
:param communicate: if True, call :meth:`subprocess.Popen.communicate` after creating the subprocess.
:param executable: if present, the path to a program to execute instead of this script.
"""
_kwargs = {
"executable": os.path.abspath(getpyfile(__file__)),
"stdin": subprocess.PIPE,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"env": self.env,
}
communicate = kwargs.pop("communicate", True)
_kwargs.update(kwargs)
kwargs = _kwargs
args = [kwargs["executable"]] + list(args)
log.debug("Creating test process %r, %r", args, kwargs)
process = subprocess.Popen(args, **kwargs)
if communicate is True:
stdout, stderr = process.communicate()
else:
stdout, stderr = None, None
self.processes.append(process)
return process, stdout, stderr
def test_functionaltest(self):
"""This is a dummy functional test."""
proc, stdout, stderr = self.sub("-h")
self.assertEqual(proc.returncode, 0)
self.assertTrue(name in stdout)
|
# coding=utf-8
import re
from lxml import etree
from lxml.html import HtmlElement
from six.moves.urllib.parse import urljoin
from .utils import make_doc1_url, is_pdf
from ..lib.html_utils import (
set_response_encoding, clean_html, fix_links_in_lxml_tree,
get_html5_parsed_text, get_html_parsed_text,
)
from ..lib.log_tools import make_default_logger
logger = make_default_logger()
# Patch the HtmlElement class to add a function that can handle regular
# expressions within XPath queries. See usages throughout AppellateDocketReport.
def re_xpath(self, path):
return self.xpath(path, namespaces={
're': 'http://exslt.org/regular-expressions'})
HtmlElement.re_xpath = re_xpath
class BaseReport(object):
"""A base report for working with pages on PACER."""
REDIRECT_REGEX = re.compile('window\.\s*?location\s*=\s*"(.*)"\s*;')
# Subclasses should override PATH
PATH = ''
# Strings below (and in subclasses) are used to identify HTML that should
# not be parsed or processed for a variety of reasons. Spaces in the strings
# below are converted to \s whitespace searches using regexes.
ERROR_STRINGS = [
"MetaMask.*web3",
'console\.log\(".*CloudMask',
]
def __init__(self, court_id, pacer_session=None):
self.court_id = court_id
self.session = pacer_session
self.tree = None
self.response = None
self.is_valid = None
super(BaseReport, self).__init__()
@property
def url(self):
if self.court_id == 'psc':
return "https://dcecf.psc.uscourts.gov/%s" % self.PATH
else:
return "https://ecf.%s.uscourts.gov/%s" % (self.court_id, self.PATH)
def query(self, *args, **kwargs):
"""Query PACER and set self.response with the response."""
raise NotImplementedError(".query() must be overridden")
def parse(self):
"""Parse the data provided in a requests.response object and set
self.tree to be an lxml etree. In most cases, you won't need to call
this since it will be automatically called by self.query, if needed.
:return: None
"""
self.response.raise_for_status()
set_response_encoding(self.response)
self._parse_text(self.response.text)
def _parse_text(self, text):
"""Parse the HTML as unicode text and set self.tree
This is a particularly critical method when running tests, which pull
from local disk instead of from a query response. This is also used
when data comes from a source other than self.query() (such as a user
upload). This method should probably be made public as .parse_text().
:param text: A unicode object
:return: None
"""
assert isinstance(text, unicode), \
"Input must be unicode, not %s" % type(text)
text = clean_html(text)
self.check_validity(text)
if self.is_valid:
self.tree = get_html5_parsed_text(text)
etree.strip_elements(self.tree, u'script')
self.tree.rewrite_links(fix_links_in_lxml_tree, base_href=self.url)
def check_validity(self, text):
"""Place sanity checks here to make sure that the returned text is
valid and not an error page or some other kind of problem.
Set self.is_valid flag to True or False
"""
for error_string in self.ERROR_STRINGS:
error_string_re = re.compile('\s+'.join(error_string.split()),
flags=re.I)
if error_string_re.search(text):
self.is_valid = False
return
self.is_valid = True
@property
def data(self):
"""Extract the data from the tree and return it."""
raise NotImplementedError('.data() must be overridden.')
def download_pdf(self, pacer_case_id, pacer_document_number):
"""Download a PDF from PACER.
Note that this doesn't support attachments yet.
:returns: request.Response object containing a PDF, if one can be found
(is not sealed, gone, etc.). Else, returns None.
"""
timeout = (60, 300)
url = make_doc1_url(self.court_id, pacer_document_number, True)
data = {
'case_id': pacer_case_id,
'got_receipt': '1',
}
logger.info("GETting PDF at URL: %s with params: %s" % (url, data))
r = self.session.get(url, params=data, timeout=timeout)
if u'This document is not available' in r.text:
logger.error("Document not available in case: %s at %s" %
(url, pacer_case_id))
return None
# Some pacer sites use window.location in their JS, so we have to look
# for that. See: oknd, 13-cv-00357-JED-FHM, doc #24. But, be warned, you
# can only catch the redirection with JS off.
m = self.REDIRECT_REGEX.search(r.text)
if m is not None:
r = self.session.get(urljoin(url, m.group(1)))
r.raise_for_status()
# The request above sometimes generates an HTML page with an iframe
# containing the PDF, and other times returns the PDF directly. ∴ either
# get the src of the iframe and download the PDF or just return the pdf.
r.raise_for_status()
if is_pdf(r):
logger.info('Got PDF binary data for case %s at: %s' % (url, data))
return r
text = clean_html(r.text)
tree = get_html_parsed_text(text)
tree.rewrite_links(fix_links_in_lxml_tree,
base_href=r.url)
try:
iframe_src = tree.xpath('//iframe/@src')[0]
except IndexError:
if 'pdf:Producer' in text:
logger.error("Unable to download PDF. PDF content was placed "
"directly in HTML. URL: %s, caseid: %s" %
(url, pacer_case_id))
else:
logger.error("Unable to download PDF. PDF not served as binary "
"data and unable to find iframe src attribute. "
"URL: %s, caseid: %s" % (url, pacer_case_id))
return None
r = self.session.get(iframe_src, timeout=timeout)
if is_pdf(r):
logger.info('Got iframed PDF data for case %s at: %s' %
(url, iframe_src))
return r
use six.text_type
# coding=utf-8
import re
from lxml import etree
from lxml.html import HtmlElement
import six
from six.moves.urllib.parse import urljoin
from .utils import make_doc1_url, is_pdf
from ..lib.html_utils import (
set_response_encoding, clean_html, fix_links_in_lxml_tree,
get_html5_parsed_text, get_html_parsed_text,
)
from ..lib.log_tools import make_default_logger
logger = make_default_logger()
# Patch the HtmlElement class to add a function that can handle regular
# expressions within XPath queries. See usages throughout AppellateDocketReport.
def re_xpath(self, path):
return self.xpath(path, namespaces={
're': 'http://exslt.org/regular-expressions'})
HtmlElement.re_xpath = re_xpath
class BaseReport(object):
"""A base report for working with pages on PACER."""
REDIRECT_REGEX = re.compile('window\.\s*?location\s*=\s*"(.*)"\s*;')
# Subclasses should override PATH
PATH = ''
# Strings below (and in subclasses) are used to identify HTML that should
# not be parsed or processed for a variety of reasons. Spaces in the strings
# below are converted to \s whitespace searches using regexes.
ERROR_STRINGS = [
"MetaMask.*web3",
'console\.log\(".*CloudMask',
]
def __init__(self, court_id, pacer_session=None):
self.court_id = court_id
self.session = pacer_session
self.tree = None
self.response = None
self.is_valid = None
super(BaseReport, self).__init__()
@property
def url(self):
if self.court_id == 'psc':
return "https://dcecf.psc.uscourts.gov/%s" % self.PATH
else:
return "https://ecf.%s.uscourts.gov/%s" % (self.court_id, self.PATH)
def query(self, *args, **kwargs):
"""Query PACER and set self.response with the response."""
raise NotImplementedError(".query() must be overridden")
def parse(self):
"""Parse the data provided in a requests.response object and set
self.tree to be an lxml etree. In most cases, you won't need to call
this since it will be automatically called by self.query, if needed.
:return: None
"""
self.response.raise_for_status()
set_response_encoding(self.response)
self._parse_text(self.response.text)
def _parse_text(self, text):
"""Parse the HTML as unicode text and set self.tree
This is a particularly critical method when running tests, which pull
from local disk instead of from a query response. This is also used
when data comes from a source other than self.query() (such as a user
upload). This method should probably be made public as .parse_text().
:param text: A unicode object
:return: None
"""
assert isinstance(text, six.text_type), \
"Input must be unicode, not %s" % type(text)
text = clean_html(text)
self.check_validity(text)
if self.is_valid:
self.tree = get_html5_parsed_text(text)
etree.strip_elements(self.tree, u'script')
self.tree.rewrite_links(fix_links_in_lxml_tree, base_href=self.url)
def check_validity(self, text):
"""Place sanity checks here to make sure that the returned text is
valid and not an error page or some other kind of problem.
Set self.is_valid flag to True or False
"""
for error_string in self.ERROR_STRINGS:
error_string_re = re.compile('\s+'.join(error_string.split()),
flags=re.I)
if error_string_re.search(text):
self.is_valid = False
return
self.is_valid = True
@property
def data(self):
"""Extract the data from the tree and return it."""
raise NotImplementedError('.data() must be overridden.')
def download_pdf(self, pacer_case_id, pacer_document_number):
"""Download a PDF from PACER.
Note that this doesn't support attachments yet.
:returns: request.Response object containing a PDF, if one can be found
(is not sealed, gone, etc.). Else, returns None.
"""
timeout = (60, 300)
url = make_doc1_url(self.court_id, pacer_document_number, True)
data = {
'case_id': pacer_case_id,
'got_receipt': '1',
}
logger.info("GETting PDF at URL: %s with params: %s" % (url, data))
r = self.session.get(url, params=data, timeout=timeout)
if u'This document is not available' in r.text:
logger.error("Document not available in case: %s at %s" %
(url, pacer_case_id))
return None
# Some pacer sites use window.location in their JS, so we have to look
# for that. See: oknd, 13-cv-00357-JED-FHM, doc #24. But, be warned, you
# can only catch the redirection with JS off.
m = self.REDIRECT_REGEX.search(r.text)
if m is not None:
r = self.session.get(urljoin(url, m.group(1)))
r.raise_for_status()
# The request above sometimes generates an HTML page with an iframe
# containing the PDF, and other times returns the PDF directly. ∴ either
# get the src of the iframe and download the PDF or just return the pdf.
r.raise_for_status()
if is_pdf(r):
logger.info('Got PDF binary data for case %s at: %s' % (url, data))
return r
text = clean_html(r.text)
tree = get_html_parsed_text(text)
tree.rewrite_links(fix_links_in_lxml_tree,
base_href=r.url)
try:
iframe_src = tree.xpath('//iframe/@src')[0]
except IndexError:
if 'pdf:Producer' in text:
logger.error("Unable to download PDF. PDF content was placed "
"directly in HTML. URL: %s, caseid: %s" %
(url, pacer_case_id))
else:
logger.error("Unable to download PDF. PDF not served as binary "
"data and unable to find iframe src attribute. "
"URL: %s, caseid: %s" % (url, pacer_case_id))
return None
r = self.session.get(iframe_src, timeout=timeout)
if is_pdf(r):
logger.info('Got iframed PDF data for case %s at: %s' %
(url, iframe_src))
return r
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from mx.DateTime import now
import wizard
import netsvc
import ir
import pooler
import time
from tools.translate import _
class opportunity2phonecall(wizard.interface):
case_form = """<?xml version="1.0"?>
<form string="Schedule Phone Call">
<separator string="Phone Call Description" colspan="4" />
<newline />
<field name='user_id' />
<field name='deadline' />
<newline />
<field name='note' colspan="4"/>
<newline />
<field name='section_id' />
<field name='category_id' domain="[('section_id','=',section_id),('object_id.model', '=', 'crm.phonecall')]"/>
</form>"""
case_fields = {
'user_id' : {'string' : 'Assign To', 'type' : 'many2one', 'relation' : 'res.users'},
'deadline' : {'string' : 'Planned Date', 'type' : 'datetime' ,'required' :True},
'note' : {'string' : 'Goals', 'type' : 'text'},
'category_id' : {'string' : 'Category', 'type' : 'many2one', 'relation' : 'crm.case.categ', 'required' :True},
'section_id' : {'string' : 'Section', 'type' : 'many2one', 'relation' : 'crm.case.section'},
}
def _default_values(self, cr, uid, data, context):
case_obj = pooler.get_pool(cr.dbname).get('crm.opportunity')
categ_id = pooler.get_pool(cr.dbname).get('crm.case.categ').search(cr, uid, [('name','=','Outbound')])
case = case_obj.browse(cr, uid, data['id'])
if case.state != 'open':
raise wizard.except_wizard(_('Warning !'),
_('Opportunity should be in \'Open\' state before converting to Phone Call.'))
return {}
return {
'user_id' : case.user_id and case.user_id.id,
'category_id' : categ_id and categ_id[0] or case.categ_id and case.categ_id.id,
'section_id' : case.section_id and case.section_id.id or False,
'note' : case.description
}
def _doIt(self, cr, uid, data, context):
form = data['form']
pool = pooler.get_pool(cr.dbname)
mod_obj = pool.get('ir.model.data')
result = mod_obj._get_id(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
res = mod_obj.read(cr, uid, result, ['res_id'])
phonecall_case_obj = pool.get('crm.phonecall')
opportunity_case_obj = pool.get('crm.opportunity')
# Select the view
data_obj = pool.get('ir.model.data')
id2 = data_obj._get_id(cr, uid, 'crm', 'crm_case_phone_tree_view')
id3 = data_obj._get_id(cr, uid, 'crm', 'crm_case_phone_form_view')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
opportunites = opportunity_case_obj.browse(cr, uid, data['ids'])
for opportunity in opportunites:
new_case = phonecall_case_obj.create(cr, uid, {
'name' : opportunity.name,
'case_id' : opportunity.id,
'user_id' : form['user_id'],
'categ_id' : form['category_id'],
'description' : form['note'],
'date' : form['deadline'],
'section_id' : form['section_id'],
'partner_id': opportunity.partner_id and opportunity.partner_id.id or False,
'partner_address_id':opportunity.partner_address_id and opportunity.partner_address_id.id or False,
'description': data['form']['note'] or opportunity.description,
'partner_phone' : opportunity.phone or (opportunity.partner_address_id and opportunity.partner_address_id.phone or False),
'partner_mobile' : opportunity.partner_address_id and opportunity.partner_address_id.mobile or False,
'priority': opportunity.priority,
'opportunity_id':opportunity.id
}, context=context)
vals = {}
phonecall_case_obj.case_open(cr, uid, [new_case])
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : new_case,
'views': [(id3,'form'),(id2,'tree'),(False,'calendar'),(False,'graph')],
'type': 'ir.actions.act_window',
'search_view_id': res['res_id']
}
return value
states = {
'init': {
'actions': [_default_values],
'result': {'type': 'form', 'arch': case_form, 'fields': case_fields,
'state' : [('end', 'Cancel','gtk-cancel'),('order', 'Schedule Phone Call','gtk-go-forward')]}
},
'order': {
'actions': [],
'result': {'type': 'action', 'action': _doIt, 'state': 'end'}
}
}
opportunity2phonecall('crm.opportunity.reschedule_phone_call')
class opportunity2meeting(wizard.interface):
def _makeMeeting(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
opportunity_case_obj = pool.get('crm.opportunity')
data_obj = pool.get('ir.model.data')
result = data_obj._get_id(cr, uid, 'crm', 'view_crm_case_meetings_filter')
id = data_obj.read(cr, uid, result, ['res_id'])
id1 = data_obj._get_id(cr, uid, 'crm', 'crm_case_calendar_view_meet')
id2 = data_obj._get_id(cr, uid, 'crm', 'crm_case_form_view_meet')
id3 = data_obj._get_id(cr, uid, 'crm', 'crm_case_tree_view_meet')
if id1:
id1 = data_obj.browse(cr, uid, id1, context=context).res_id
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
opportunity = opportunity_case_obj.browse(cr, uid, data['id'], context=context)
partner_id = opportunity.partner_id and opportunity.partner_id.id or False
name = opportunity.name
email = opportunity.email_from
section_id = opportunity.section_id and opportunity.section_id.id or False
return {
'name': _('Meetings'),
'domain' : "[('user_id','=',%s)]"%(uid),
'context': {'default_partner_id': partner_id, 'default_section_id': section_id, 'default_email_from': email, 'default_state':'open', 'default_name':name},
'view_type': 'form',
'view_mode': 'calendar,form,tree',
'res_model': 'crm.meeting',
'view_id': False,
'views': [(id1,'calendar'),(id2,'form'),(id3,'tree')],
'type': 'ir.actions.act_window',
'search_view_id': id['res_id']
}
states = {
'init': {
'actions': [],
'result': {'type': 'action', 'action': _makeMeeting, 'state': 'order'}
},
'order': {
'actions': [],
'result': {'type': 'state', 'state': 'end'}
}
}
opportunity2meeting('crm.opportunity.meeting_set')
class partner_opportunity(wizard.interface):
case_form = """<?xml version="1.0"?>
<form string="Create Opportunity">
<field name="name"/>
<field name="partner_id" readonly="1"/>
<newline/>
<field name="planned_revenue"/>
<field name="probability"/>
</form>"""
case_fields = {
'name' : {'type' :'char', 'size' :64, 'string' :'Opportunity Name', 'required' :True},
'planned_revenue' : {'type' :'float', 'digits' :(16, 2), 'string' : 'Expected Revenue'},
'probability' : {'type' :'float', 'digits' :(16, 2), 'string' : 'Success Probability'},
'partner_id' : {'type' :'many2one', 'relation' :'res.partner', 'string' :'Partner'},
}
def _select_data(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
part_obj = pool.get('res.partner')
part = part_obj.read(cr, uid, data['id' ], ['name'])
return {'partner_id' : data['id'], 'name' : part['name'] }
def _make_opportunity(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
data_obj = pool.get('ir.model.data')
result = data_obj._get_id(cr, uid, 'crm', 'view_crm_case_opportunities_filter')
res = data_obj.read(cr, uid, result, ['res_id'])
id2 = data_obj._get_id(cr, uid, 'crm', 'crm_case_form_view_oppor')
id3 = data_obj._get_id(cr, uid, 'crm', 'crm_case_tree_view_oppor')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
part_obj = pool.get('res.partner')
address = part_obj.address_get(cr, uid, data['ids' ])
categ_obj = pool.get('crm.case.categ')
categ_ids = categ_obj.search(cr, uid, [('name','ilike','Part%')])
case_obj = pool.get('crm.opportunity')
opp_id = case_obj.create(cr, uid, {
'name' : data['form']['name'],
'planned_revenue' : data['form']['planned_revenue'],
'probability' : data['form']['probability'],
'partner_id' : data['form']['partner_id'],
'partner_address_id' : address['default'],
'categ_id' : categ_ids[0],
'state' :'draft',
})
value = {
'name' : _('Opportunity'),
'view_type' : 'form',
'view_mode' : 'form,tree',
'res_model' : 'crm.opportunity',
'res_id' : opp_id,
'view_id' : False,
'views' : [(id2, 'form'), (id3, 'tree'), (False, 'calendar'), (False, 'graph')],
'type' : 'ir.actions.act_window',
'search_view_id' : res['res_id']
}
return value
states = {
'init' : {
'actions' : [_select_data],
'result' : {'type' : 'form', 'arch' : case_form, 'fields' : case_fields,
'state' : [('end', 'Cancel', 'gtk-cancel'), ('confirm', 'Create Opportunity', 'gtk-go-forward')]}
},
'confirm' : {
'actions' : [],
'result' : {'type' : 'action', 'action' : _make_opportunity, 'state' : 'end'}
}
}
partner_opportunity('crm.case.opportunity.partner_opportunity')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
[IMP]: crm(Opportunity): Added domain for Current Opportunity in schedule a meeting wizard
bzr revid: rpa@openerp.co.in-20100305100309-xy1we9khjuy5tcqx
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from mx.DateTime import now
import wizard
import netsvc
import ir
import pooler
import time
from tools.translate import _
class opportunity2phonecall(wizard.interface):
case_form = """<?xml version="1.0"?>
<form string="Schedule Phone Call">
<separator string="Phone Call Description" colspan="4" />
<newline />
<field name='user_id' />
<field name='deadline' />
<newline />
<field name='note' colspan="4"/>
<newline />
<field name='section_id' />
<field name='category_id' domain="[('section_id','=',section_id),('object_id.model', '=', 'crm.phonecall')]"/>
</form>"""
case_fields = {
'user_id' : {'string' : 'Assign To', 'type' : 'many2one', 'relation' : 'res.users'},
'deadline' : {'string' : 'Planned Date', 'type' : 'datetime' ,'required' :True},
'note' : {'string' : 'Goals', 'type' : 'text'},
'category_id' : {'string' : 'Category', 'type' : 'many2one', 'relation' : 'crm.case.categ', 'required' :True},
'section_id' : {'string' : 'Section', 'type' : 'many2one', 'relation' : 'crm.case.section'},
}
def _default_values(self, cr, uid, data, context):
case_obj = pooler.get_pool(cr.dbname).get('crm.opportunity')
categ_id = pooler.get_pool(cr.dbname).get('crm.case.categ').search(cr, uid, [('name','=','Outbound')])
case = case_obj.browse(cr, uid, data['id'])
if case.state != 'open':
raise wizard.except_wizard(_('Warning !'),
_('Opportunity should be in \'Open\' state before converting to Phone Call.'))
return {}
return {
'user_id' : case.user_id and case.user_id.id,
'category_id' : categ_id and categ_id[0] or case.categ_id and case.categ_id.id,
'section_id' : case.section_id and case.section_id.id or False,
'note' : case.description
}
def _doIt(self, cr, uid, data, context):
form = data['form']
pool = pooler.get_pool(cr.dbname)
mod_obj = pool.get('ir.model.data')
result = mod_obj._get_id(cr, uid, 'crm', 'view_crm_case_phonecalls_filter')
res = mod_obj.read(cr, uid, result, ['res_id'])
phonecall_case_obj = pool.get('crm.phonecall')
opportunity_case_obj = pool.get('crm.opportunity')
# Select the view
data_obj = pool.get('ir.model.data')
id2 = data_obj._get_id(cr, uid, 'crm', 'crm_case_phone_tree_view')
id3 = data_obj._get_id(cr, uid, 'crm', 'crm_case_phone_form_view')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
opportunites = opportunity_case_obj.browse(cr, uid, data['ids'])
for opportunity in opportunites:
new_case = phonecall_case_obj.create(cr, uid, {
'name' : opportunity.name,
'case_id' : opportunity.id,
'user_id' : form['user_id'],
'categ_id' : form['category_id'],
'description' : form['note'],
'date' : form['deadline'],
'section_id' : form['section_id'],
'partner_id': opportunity.partner_id and opportunity.partner_id.id or False,
'partner_address_id':opportunity.partner_address_id and opportunity.partner_address_id.id or False,
'description': data['form']['note'] or opportunity.description,
'partner_phone' : opportunity.phone or (opportunity.partner_address_id and opportunity.partner_address_id.phone or False),
'partner_mobile' : opportunity.partner_address_id and opportunity.partner_address_id.mobile or False,
'priority': opportunity.priority,
'opportunity_id':opportunity.id
}, context=context)
vals = {}
phonecall_case_obj.case_open(cr, uid, [new_case])
value = {
'name': _('Phone Call'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'crm.phonecall',
'res_id' : new_case,
'views': [(id3,'form'),(id2,'tree'),(False,'calendar'),(False,'graph')],
'type': 'ir.actions.act_window',
'search_view_id': res['res_id']
}
return value
states = {
'init': {
'actions': [_default_values],
'result': {'type': 'form', 'arch': case_form, 'fields': case_fields,
'state' : [('end', 'Cancel','gtk-cancel'),('order', 'Schedule Phone Call','gtk-go-forward')]}
},
'order': {
'actions': [],
'result': {'type': 'action', 'action': _doIt, 'state': 'end'}
}
}
opportunity2phonecall('crm.opportunity.reschedule_phone_call')
class opportunity2meeting(wizard.interface):
def _makeMeeting(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
opportunity_case_obj = pool.get('crm.opportunity')
data_obj = pool.get('ir.model.data')
result = data_obj._get_id(cr, uid, 'crm', 'view_crm_case_meetings_filter')
id = data_obj.read(cr, uid, result, ['res_id'])
id1 = data_obj._get_id(cr, uid, 'crm', 'crm_case_calendar_view_meet')
id2 = data_obj._get_id(cr, uid, 'crm', 'crm_case_form_view_meet')
id3 = data_obj._get_id(cr, uid, 'crm', 'crm_case_tree_view_meet')
if id1:
id1 = data_obj.browse(cr, uid, id1, context=context).res_id
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
opportunity = opportunity_case_obj.browse(cr, uid, data['id'], context=context)
partner_id = opportunity.partner_id and opportunity.partner_id.id or False
name = opportunity.name
email = opportunity.email_from
section_id = opportunity.section_id and opportunity.section_id.id or False
return {
'name': _('Meetings'),
'domain' : "[('user_id','=',%s), ('opportunity_id', '=', %s)]"%(uid, data['id']),
'context': {'default_partner_id': partner_id,
'default_opportunity_id': data['id'],
'default_section_id': section_id,
'default_email_from': email,
'default_state':'open', 'default_name':name},
'view_type': 'form',
'view_mode': 'tree,form,calendar',
'res_model': 'crm.meeting',
'view_id': False,
'views': [(id3, 'tree'), (id2, 'form'), (id1, 'calendar')],
'type': 'ir.actions.act_window',
'search_view_id': id['res_id']
}
states = {
'init': {
'actions': [],
'result': {'type': 'action', 'action': _makeMeeting, 'state': 'order'}
},
'order': {
'actions': [],
'result': {'type': 'state', 'state': 'end'}
}
}
opportunity2meeting('crm.opportunity.meeting_set')
class partner_opportunity(wizard.interface):
case_form = """<?xml version="1.0"?>
<form string="Create Opportunity">
<field name="name"/>
<field name="partner_id" readonly="1"/>
<newline/>
<field name="planned_revenue"/>
<field name="probability"/>
</form>"""
case_fields = {
'name' : {'type' :'char', 'size' :64, 'string' :'Opportunity Name', 'required' :True},
'planned_revenue' : {'type' :'float', 'digits' :(16, 2), 'string' : 'Expected Revenue'},
'probability' : {'type' :'float', 'digits' :(16, 2), 'string' : 'Success Probability'},
'partner_id' : {'type' :'many2one', 'relation' :'res.partner', 'string' :'Partner'},
}
def _select_data(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
part_obj = pool.get('res.partner')
part = part_obj.read(cr, uid, data['id' ], ['name'])
return {'partner_id' : data['id'], 'name' : part['name'] }
def _make_opportunity(self, cr, uid, data, context):
pool = pooler.get_pool(cr.dbname)
data_obj = pool.get('ir.model.data')
result = data_obj._get_id(cr, uid, 'crm', 'view_crm_case_opportunities_filter')
res = data_obj.read(cr, uid, result, ['res_id'])
id2 = data_obj._get_id(cr, uid, 'crm', 'crm_case_form_view_oppor')
id3 = data_obj._get_id(cr, uid, 'crm', 'crm_case_tree_view_oppor')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
part_obj = pool.get('res.partner')
address = part_obj.address_get(cr, uid, data['ids' ])
categ_obj = pool.get('crm.case.categ')
categ_ids = categ_obj.search(cr, uid, [('name','ilike','Part%')])
case_obj = pool.get('crm.opportunity')
opp_id = case_obj.create(cr, uid, {
'name' : data['form']['name'],
'planned_revenue' : data['form']['planned_revenue'],
'probability' : data['form']['probability'],
'partner_id' : data['form']['partner_id'],
'partner_address_id' : address['default'],
'categ_id' : categ_ids[0],
'state' :'draft',
})
value = {
'name' : _('Opportunity'),
'view_type' : 'form',
'view_mode' : 'form,tree',
'res_model' : 'crm.opportunity',
'res_id' : opp_id,
'view_id' : False,
'views' : [(id2, 'form'), (id3, 'tree'), (False, 'calendar'), (False, 'graph')],
'type' : 'ir.actions.act_window',
'search_view_id' : res['res_id']
}
return value
states = {
'init' : {
'actions' : [_select_data],
'result' : {'type' : 'form', 'arch' : case_form, 'fields' : case_fields,
'state' : [('end', 'Cancel', 'gtk-cancel'), ('confirm', 'Create Opportunity', 'gtk-go-forward')]}
},
'confirm' : {
'actions' : [],
'result' : {'type' : 'action', 'action' : _make_opportunity, 'state' : 'end'}
}
}
partner_opportunity('crm.case.opportunity.partner_opportunity')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
"""This is an example of a local_settings.py module needed by Emocracy Change as
needed and rename it to local_settings.py """
SECRET_KEY = 'SERVER SPECIFIC DO NOT SHARE!'
CONSUMER_KEY = "emocracy consumer key"
CONSUMER_SECRET = "emocracy consumer secret"
DATABASE_ENGINE = 'postgresql_psycopg2' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'thirdparty'
DATABASE_USER = 'thirdparty' # Not used with sqlite3.
DATABASE_PASSWORD = 'thirdparty' # Not used with sqlite3.
DATABASE_HOST = 'localhost' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
added REALM and EMOCRACY_API_SERVER
"""This is an example of a local_settings.py module needed by Emocracy Change as
needed and rename it to local_settings.py """
SECRET_KEY = 'SERVER SPECIFIC DO NOT SHARE!'
CONSUMER_KEY = "emocracy consumer key"
CONSUMER_SECRET = "emocracy consumer secret"
REALM = 'emo.preeker.net'
EMOCRACY_API_SERVER = "http://emo.preeker.net/"
DATABASE_ENGINE = 'postgresql_psycopg2' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'thirdparty'
DATABASE_USER = 'thirdparty' # Not used with sqlite3.
DATABASE_PASSWORD = 'thirdparty' # Not used with sqlite3.
DATABASE_HOST = 'localhost' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
|
""" The document module provides the Document class, which is a container
for all Bokeh objects that mustbe reflected to the client side BokehJS
library.
"""
from __future__ import absolute_import
import logging
logger = logging.getLogger(__file__)
from bokeh.util.callback_manager import _check_callback
from bokeh._json_encoder import serialize_json
from .plot_object import PlotObject
from .validation import check_integrity
from json import loads
from bokeh.properties import ContainerProperty
class DocumentChangedEvent(object):
def __init__(self, document):
self.document = document
class ModelChangedEvent(DocumentChangedEvent):
def __init__(self, document, model, attr, old, new):
super(ModelChangedEvent, self).__init__(document)
self.model = model
self.attr = attr
self.old = old
self.new = new
class RootAddedEvent(DocumentChangedEvent):
def __init__(self, document, model):
super(RootAddedEvent, self).__init__(document)
self.model = model
class RootRemovedEvent(DocumentChangedEvent):
def __init__(self, document, model):
super(RootRemovedEvent, self).__init__(document)
self.model = model
class Document(object):
def __init__(self):
self._roots = set()
# TODO (bev) add vars, stores
self._all_models_freeze_count = 0
self._all_models = dict()
self._callbacks = []
def clear(self):
''' Remove all content from the document (including roots, vars, stores) '''
self._push_all_models_freeze()
try:
while len(self._roots) > 0:
r = next(iter(self._roots))
self.remove_root(r)
finally:
self._pop_all_models_freeze()
def _destructively_move(self, dest_doc):
'''Move all fields in this doc to the dest_doc, leaving this doc empty'''
if dest_doc is self:
raise RuntimeError("Attempted to overwrite a document with itself")
dest_doc.clear()
# we have to remove ALL roots before adding any
# to the new doc or else models referenced from multiple
# roots could be in both docs at once, which isn't allowed.
roots = []
self._push_all_models_freeze()
try:
while self.roots:
r = next(iter(self.roots))
self.remove_root(r)
roots.append(r)
finally:
self._pop_all_models_freeze()
for r in roots:
if r.document is not None:
raise RuntimeError("Somehow we didn't detach %r" % (r))
if len(self._all_models) != 0:
raise RuntimeError("_all_models still had stuff in it: %r" % (self._all_models))
for r in roots:
dest_doc.add_root(r)
# TODO other fields of doc
def _push_all_models_freeze(self):
self._all_models_freeze_count += 1
def _pop_all_models_freeze(self):
self._all_models_freeze_count -= 1
if self._all_models_freeze_count == 0:
self._recompute_all_models()
def _invalidate_all_models(self):
# if freeze count is > 0, we'll recompute on unfreeze
if self._all_models_freeze_count == 0:
self._recompute_all_models()
def _recompute_all_models(self):
new_all_models_set = set()
for r in self.roots:
new_all_models_set = new_all_models_set.union(r.references())
old_all_models_set = set(self._all_models.values())
to_detach = old_all_models_set - new_all_models_set
to_attach = new_all_models_set - old_all_models_set
recomputed = {}
for m in new_all_models_set:
recomputed[m._id] = m
for d in to_detach:
d._detach_document()
for a in to_attach:
a._attach_document(self)
self._all_models = recomputed
@property
def roots(self):
return set(self._roots)
def add_root(self, model):
''' Add a model as a root model to this Document.
Any changes to this model (including to other models referred to
by it) will trigger "on_change" callbacks registered on this
Document.
'''
if model in self._roots:
return
self._push_all_models_freeze()
try:
self._roots.add(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootAddedEvent(self, model))
# TODO (havocp) should probably drop either this or add_root.
# this is the backward compatible one but perhaps a tad unclear
# if we also allow adding other things besides roots.
def add(self, *objects):
""" Call add_root() on each object.
.. warning::
This function should only be called on top level objects such
as Plot, and Layout containers.
Args:
*objects (PlotObject) : objects to add to the Document
Returns:
None
"""
for obj in objects:
self.add_root(obj)
def remove_root(self, model):
''' Remove a model as root model from this Document.
Changes to this model may still trigger "on_change" callbacks
on this Document, if the model is still referred to by other
root models.
'''
if model not in self._roots:
return # TODO (bev) ValueError?
self._push_all_models_freeze()
try:
self._roots.remove(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootRemovedEvent(self, model))
def get_model_by_id(self, model_id):
''' Get the model object for the given ID or None if not found'''
return self._all_models.get(model_id, None)
def on_change(self, *callbacks):
''' Invoke callback if the document or any PlotObject reachable from its roots changes.
'''
for callback in callbacks:
if callback in self._callbacks: continue
_check_callback(callback, ('event',))
self._callbacks.append(callback)
def remove_on_change(self, *callbacks):
''' Remove a callback added earlier with on_change()
Throws an error if the callback wasn't added
'''
for callback in callbacks:
self._callbacks.remove(callback)
def _trigger_on_change(self, event):
for cb in self._callbacks:
cb(event)
def _notify_change(self, model, attr, old, new):
''' Called by PlotObject when it changes
'''
self._trigger_on_change(ModelChangedEvent(self, model, attr, old, new))
@classmethod
def _references_json(cls, references):
'''Given a list of all models in a graph, return JSON representing them and their properties.'''
references_json = []
for r in references:
ref = r.ref
ref['attributes'] = r.vm_serialize(changed_only=False)
# 'id' is in 'ref' already
# TODO (havocp) don't put this id here in the first place,
# by fixing vm_serialize once we establish that other
# users of it don't exist anymore or whatever
del ref['attributes']['id']
references_json.append(ref)
return references_json
@classmethod
def _instantiate_references_json(cls, references_json):
'''Given a JSON representation of all the models in a graph, return a dict of new model objects.'''
# Create all instances, but without setting their props
references = {}
for obj in references_json:
obj_id = obj['id']
obj_type = obj.get('subtype', obj['type'])
cls = PlotObject.get_class(obj_type)
instance = cls(id=obj_id, _block_events=True)
if instance is None:
raise RuntimeError('Error loading model from JSON (type: %s, id: %s)' % (obj_type, obj_id))
references[instance._id] = instance
return references
@classmethod
def _initialize_references_json(cls, references_json, references):
'''Given a JSON representation of the models in a graph and new model objects, set the properties on the models from the JSON'''
for obj in references_json:
obj_id = obj['id']
obj_attrs = obj['attributes']
instance = references[obj_id]
# replace references with actual instances in obj_attrs
for p in instance.properties_with_refs():
if p in obj_attrs:
prop = instance.lookup(p)
obj_attrs[p] = prop.from_json(obj_attrs[p], models=references)
# set all properties on the instance
remove = []
for key in obj_attrs:
if key not in instance.properties():
logger.warn("Client sent attr %r for instance %r, which is a client-only or invalid attribute that shouldn't have been sent", key, instance)
remove.append(key)
for key in remove:
del obj_attrs[key]
instance.update(**obj_attrs)
def to_json_string(self):
''' Convert the document to a JSON string. '''
root_ids = []
for r in self._roots:
root_ids.append(r._id)
root_references = self._all_models.values()
json = {
'roots' : {
'root_ids' : root_ids,
'references' : self._references_json(root_references)
}
}
return serialize_json(json)
def to_json(self):
''' Convert the document to a JSON object. '''
# this is a total hack to go via a string, needed because
# our BokehJSONEncoder goes straight to a string.
doc_json = self.to_json_string()
return loads(doc_json)
@classmethod
def from_json_string(cls, json):
''' Load a document from JSON. '''
json_parsed = loads(json)
return cls.from_json(json_parsed)
@classmethod
def from_json(cls, json):
''' Load a document from JSON. '''
roots_json = json['roots']
root_ids = roots_json['root_ids']
references_json = roots_json['references']
references = cls._instantiate_references_json(references_json)
cls._initialize_references_json(references_json, references)
doc = Document()
for r in root_ids:
doc.add_root(references[r])
return doc
def replace_with_json(self, json):
''' Overwrite everything in this document with the JSON-encoded document '''
replacement = self.from_json(json)
replacement._destructively_move(self)
def create_json_patch_string(self, events):
''' Create a JSON string describing a patch to be applied with apply_json_patch_string()
Args:
events : list of events to be translated into patches
Returns:
str : JSON string which can be applied to make the given updates to obj
'''
references = set()
json_events = []
for event in events:
if event.document is not self:
raise ValueError("Cannot create a patch using events from a different document " + repr(event))
if isinstance(event, ModelChangedEvent):
value = event.new
# the new value is an object that may have
# not-yet-in-the-remote-doc references, and may also
# itself not be in the remote doc yet. the remote may
# already have some of the references, but
# unfortunately we don't have an easy way to know
# unless we were to check BEFORE the attr gets changed
# (we need the old _all_models before setting the
# property). So we have to send all the references the
# remote could need, even though it could be inefficient.
# If it turns out we need to fix this we could probably
# do it by adding some complexity.
value_refs = set(PlotObject.collect_plot_objects(value))
# we know we don't want a whole new copy of the obj we're patching
# unless it's also the new value
if event.model != value:
value_refs.discard(event.model)
references = references.union(value_refs)
json_events.append({ 'kind' : 'ModelChanged',
'model' : event.model.ref,
'attr' : event.attr,
'new' : value })
elif isinstance(event, RootAddedEvent):
references = references.union(event.model.references())
json_events.append({ 'kind' : 'RootAdded',
'model' : event.model.ref })
elif isinstance(event, RootRemovedEvent):
json_events.append({ 'kind' : 'RootRemoved',
'model' : event.model.ref })
json = {
'events' : json_events,
'references' : self._references_json(references)
}
return serialize_json(json)
def apply_json_patch_string(self, patch):
''' Apply a JSON patch string created by create_json_patch_string() '''
json_parsed = loads(patch)
self.apply_json_patch(json_parsed)
def apply_json_patch(self, patch):
''' Apply a JSON patch object created by parsing the result of create_json_patch_string() '''
references_json = patch['references']
events_json = patch['events']
references = self._instantiate_references_json(references_json)
# Use our existing model instances whenever we have them
for obj in references.values():
if obj._id in self._all_models:
references[obj._id] = self._all_models[obj._id]
# The model being changed isn't always in references so add it in
for event_json in events_json:
if 'model' in event_json:
model_id = event_json['model']['id']
if model_id in self._all_models:
references[model_id] = self._all_models[model_id]
self._initialize_references_json(references_json, references)
for event_json in events_json:
if event_json['kind'] == 'ModelChanged':
patched_id = event_json['model']['id']
if patched_id not in self._all_models:
raise RuntimeError("Cannot apply patch to %s which is not in the document" % (str(patched_id)))
patched_obj = self._all_models[patched_id]
attr = event_json['attr']
value = event_json['new']
if attr in patched_obj.properties_with_refs():
prop = patched_obj.lookup(attr)
value = prop.from_json(value, models=references)
if attr in patched_obj.properties():
#logger.debug("Patching attribute %s of %r", attr, patched_obj)
patched_obj.update(** { attr : value })
else:
logger.warn("Client sent attr %r on obj %r, which is a client-only or invalid attribute that shouldn't have been sent", attr, patched_obj)
elif event_json['kind'] == 'RootAdded':
root_id = event_json['model']['id']
root_obj = references[root_id]
self.add_root(root_obj)
elif event_json['kind'] == 'RootRemoved':
root_id = event_json['model']['id']
root_obj = references[root_id]
self.remove_root(root_obj)
else:
raise RuntimeError("Unknown patch event " + repr(event_json))
def validate(self):
# logging.basicConfig is a no-op if there's already
# some logging configured. We want to make sure warnings
# go somewhere so configure here if nobody has.
logging.basicConfig(level=logging.INFO)
root_sets = []
for r in self.roots:
refs = r.references()
root_sets.append(refs)
check_integrity(refs)
Remove unused import
""" The document module provides the Document class, which is a container
for all Bokeh objects that mustbe reflected to the client side BokehJS
library.
"""
from __future__ import absolute_import
import logging
logger = logging.getLogger(__file__)
from bokeh.util.callback_manager import _check_callback
from bokeh._json_encoder import serialize_json
from .plot_object import PlotObject
from .validation import check_integrity
from json import loads
class DocumentChangedEvent(object):
def __init__(self, document):
self.document = document
class ModelChangedEvent(DocumentChangedEvent):
def __init__(self, document, model, attr, old, new):
super(ModelChangedEvent, self).__init__(document)
self.model = model
self.attr = attr
self.old = old
self.new = new
class RootAddedEvent(DocumentChangedEvent):
def __init__(self, document, model):
super(RootAddedEvent, self).__init__(document)
self.model = model
class RootRemovedEvent(DocumentChangedEvent):
def __init__(self, document, model):
super(RootRemovedEvent, self).__init__(document)
self.model = model
class Document(object):
def __init__(self):
self._roots = set()
# TODO (bev) add vars, stores
self._all_models_freeze_count = 0
self._all_models = dict()
self._callbacks = []
def clear(self):
''' Remove all content from the document (including roots, vars, stores) '''
self._push_all_models_freeze()
try:
while len(self._roots) > 0:
r = next(iter(self._roots))
self.remove_root(r)
finally:
self._pop_all_models_freeze()
def _destructively_move(self, dest_doc):
'''Move all fields in this doc to the dest_doc, leaving this doc empty'''
if dest_doc is self:
raise RuntimeError("Attempted to overwrite a document with itself")
dest_doc.clear()
# we have to remove ALL roots before adding any
# to the new doc or else models referenced from multiple
# roots could be in both docs at once, which isn't allowed.
roots = []
self._push_all_models_freeze()
try:
while self.roots:
r = next(iter(self.roots))
self.remove_root(r)
roots.append(r)
finally:
self._pop_all_models_freeze()
for r in roots:
if r.document is not None:
raise RuntimeError("Somehow we didn't detach %r" % (r))
if len(self._all_models) != 0:
raise RuntimeError("_all_models still had stuff in it: %r" % (self._all_models))
for r in roots:
dest_doc.add_root(r)
# TODO other fields of doc
def _push_all_models_freeze(self):
self._all_models_freeze_count += 1
def _pop_all_models_freeze(self):
self._all_models_freeze_count -= 1
if self._all_models_freeze_count == 0:
self._recompute_all_models()
def _invalidate_all_models(self):
# if freeze count is > 0, we'll recompute on unfreeze
if self._all_models_freeze_count == 0:
self._recompute_all_models()
def _recompute_all_models(self):
new_all_models_set = set()
for r in self.roots:
new_all_models_set = new_all_models_set.union(r.references())
old_all_models_set = set(self._all_models.values())
to_detach = old_all_models_set - new_all_models_set
to_attach = new_all_models_set - old_all_models_set
recomputed = {}
for m in new_all_models_set:
recomputed[m._id] = m
for d in to_detach:
d._detach_document()
for a in to_attach:
a._attach_document(self)
self._all_models = recomputed
@property
def roots(self):
return set(self._roots)
def add_root(self, model):
''' Add a model as a root model to this Document.
Any changes to this model (including to other models referred to
by it) will trigger "on_change" callbacks registered on this
Document.
'''
if model in self._roots:
return
self._push_all_models_freeze()
try:
self._roots.add(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootAddedEvent(self, model))
# TODO (havocp) should probably drop either this or add_root.
# this is the backward compatible one but perhaps a tad unclear
# if we also allow adding other things besides roots.
def add(self, *objects):
""" Call add_root() on each object.
.. warning::
This function should only be called on top level objects such
as Plot, and Layout containers.
Args:
*objects (PlotObject) : objects to add to the Document
Returns:
None
"""
for obj in objects:
self.add_root(obj)
def remove_root(self, model):
''' Remove a model as root model from this Document.
Changes to this model may still trigger "on_change" callbacks
on this Document, if the model is still referred to by other
root models.
'''
if model not in self._roots:
return # TODO (bev) ValueError?
self._push_all_models_freeze()
try:
self._roots.remove(model)
finally:
self._pop_all_models_freeze()
self._trigger_on_change(RootRemovedEvent(self, model))
def get_model_by_id(self, model_id):
''' Get the model object for the given ID or None if not found'''
return self._all_models.get(model_id, None)
def on_change(self, *callbacks):
''' Invoke callback if the document or any PlotObject reachable from its roots changes.
'''
for callback in callbacks:
if callback in self._callbacks: continue
_check_callback(callback, ('event',))
self._callbacks.append(callback)
def remove_on_change(self, *callbacks):
''' Remove a callback added earlier with on_change()
Throws an error if the callback wasn't added
'''
for callback in callbacks:
self._callbacks.remove(callback)
def _trigger_on_change(self, event):
for cb in self._callbacks:
cb(event)
def _notify_change(self, model, attr, old, new):
''' Called by PlotObject when it changes
'''
self._trigger_on_change(ModelChangedEvent(self, model, attr, old, new))
@classmethod
def _references_json(cls, references):
'''Given a list of all models in a graph, return JSON representing them and their properties.'''
references_json = []
for r in references:
ref = r.ref
ref['attributes'] = r.vm_serialize(changed_only=False)
# 'id' is in 'ref' already
# TODO (havocp) don't put this id here in the first place,
# by fixing vm_serialize once we establish that other
# users of it don't exist anymore or whatever
del ref['attributes']['id']
references_json.append(ref)
return references_json
@classmethod
def _instantiate_references_json(cls, references_json):
'''Given a JSON representation of all the models in a graph, return a dict of new model objects.'''
# Create all instances, but without setting their props
references = {}
for obj in references_json:
obj_id = obj['id']
obj_type = obj.get('subtype', obj['type'])
cls = PlotObject.get_class(obj_type)
instance = cls(id=obj_id, _block_events=True)
if instance is None:
raise RuntimeError('Error loading model from JSON (type: %s, id: %s)' % (obj_type, obj_id))
references[instance._id] = instance
return references
@classmethod
def _initialize_references_json(cls, references_json, references):
'''Given a JSON representation of the models in a graph and new model objects, set the properties on the models from the JSON'''
for obj in references_json:
obj_id = obj['id']
obj_attrs = obj['attributes']
instance = references[obj_id]
# replace references with actual instances in obj_attrs
for p in instance.properties_with_refs():
if p in obj_attrs:
prop = instance.lookup(p)
obj_attrs[p] = prop.from_json(obj_attrs[p], models=references)
# set all properties on the instance
remove = []
for key in obj_attrs:
if key not in instance.properties():
logger.warn("Client sent attr %r for instance %r, which is a client-only or invalid attribute that shouldn't have been sent", key, instance)
remove.append(key)
for key in remove:
del obj_attrs[key]
instance.update(**obj_attrs)
def to_json_string(self):
''' Convert the document to a JSON string. '''
root_ids = []
for r in self._roots:
root_ids.append(r._id)
root_references = self._all_models.values()
json = {
'roots' : {
'root_ids' : root_ids,
'references' : self._references_json(root_references)
}
}
return serialize_json(json)
def to_json(self):
''' Convert the document to a JSON object. '''
# this is a total hack to go via a string, needed because
# our BokehJSONEncoder goes straight to a string.
doc_json = self.to_json_string()
return loads(doc_json)
@classmethod
def from_json_string(cls, json):
''' Load a document from JSON. '''
json_parsed = loads(json)
return cls.from_json(json_parsed)
@classmethod
def from_json(cls, json):
''' Load a document from JSON. '''
roots_json = json['roots']
root_ids = roots_json['root_ids']
references_json = roots_json['references']
references = cls._instantiate_references_json(references_json)
cls._initialize_references_json(references_json, references)
doc = Document()
for r in root_ids:
doc.add_root(references[r])
return doc
def replace_with_json(self, json):
''' Overwrite everything in this document with the JSON-encoded document '''
replacement = self.from_json(json)
replacement._destructively_move(self)
def create_json_patch_string(self, events):
''' Create a JSON string describing a patch to be applied with apply_json_patch_string()
Args:
events : list of events to be translated into patches
Returns:
str : JSON string which can be applied to make the given updates to obj
'''
references = set()
json_events = []
for event in events:
if event.document is not self:
raise ValueError("Cannot create a patch using events from a different document " + repr(event))
if isinstance(event, ModelChangedEvent):
value = event.new
# the new value is an object that may have
# not-yet-in-the-remote-doc references, and may also
# itself not be in the remote doc yet. the remote may
# already have some of the references, but
# unfortunately we don't have an easy way to know
# unless we were to check BEFORE the attr gets changed
# (we need the old _all_models before setting the
# property). So we have to send all the references the
# remote could need, even though it could be inefficient.
# If it turns out we need to fix this we could probably
# do it by adding some complexity.
value_refs = set(PlotObject.collect_plot_objects(value))
# we know we don't want a whole new copy of the obj we're patching
# unless it's also the new value
if event.model != value:
value_refs.discard(event.model)
references = references.union(value_refs)
json_events.append({ 'kind' : 'ModelChanged',
'model' : event.model.ref,
'attr' : event.attr,
'new' : value })
elif isinstance(event, RootAddedEvent):
references = references.union(event.model.references())
json_events.append({ 'kind' : 'RootAdded',
'model' : event.model.ref })
elif isinstance(event, RootRemovedEvent):
json_events.append({ 'kind' : 'RootRemoved',
'model' : event.model.ref })
json = {
'events' : json_events,
'references' : self._references_json(references)
}
return serialize_json(json)
def apply_json_patch_string(self, patch):
''' Apply a JSON patch string created by create_json_patch_string() '''
json_parsed = loads(patch)
self.apply_json_patch(json_parsed)
def apply_json_patch(self, patch):
''' Apply a JSON patch object created by parsing the result of create_json_patch_string() '''
references_json = patch['references']
events_json = patch['events']
references = self._instantiate_references_json(references_json)
# Use our existing model instances whenever we have them
for obj in references.values():
if obj._id in self._all_models:
references[obj._id] = self._all_models[obj._id]
# The model being changed isn't always in references so add it in
for event_json in events_json:
if 'model' in event_json:
model_id = event_json['model']['id']
if model_id in self._all_models:
references[model_id] = self._all_models[model_id]
self._initialize_references_json(references_json, references)
for event_json in events_json:
if event_json['kind'] == 'ModelChanged':
patched_id = event_json['model']['id']
if patched_id not in self._all_models:
raise RuntimeError("Cannot apply patch to %s which is not in the document" % (str(patched_id)))
patched_obj = self._all_models[patched_id]
attr = event_json['attr']
value = event_json['new']
if attr in patched_obj.properties_with_refs():
prop = patched_obj.lookup(attr)
value = prop.from_json(value, models=references)
if attr in patched_obj.properties():
#logger.debug("Patching attribute %s of %r", attr, patched_obj)
patched_obj.update(** { attr : value })
else:
logger.warn("Client sent attr %r on obj %r, which is a client-only or invalid attribute that shouldn't have been sent", attr, patched_obj)
elif event_json['kind'] == 'RootAdded':
root_id = event_json['model']['id']
root_obj = references[root_id]
self.add_root(root_obj)
elif event_json['kind'] == 'RootRemoved':
root_id = event_json['model']['id']
root_obj = references[root_id]
self.remove_root(root_obj)
else:
raise RuntimeError("Unknown patch event " + repr(event_json))
def validate(self):
# logging.basicConfig is a no-op if there's already
# some logging configured. We want to make sure warnings
# go somewhere so configure here if nobody has.
logging.basicConfig(level=logging.INFO)
root_sets = []
for r in self.roots:
refs = r.references()
root_sets.append(refs)
check_integrity(refs)
|
# This is a virtual instrument abstracting a homodyne
# source which controls RF and LO sources
import logging
import numpy as np
from time import time
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
# Used for uploading the right AWG sequences
from pycqed.measurement.pulse_sequences import standard_sequences as st_seqs
import time
class HeterodyneInstrument(Instrument):
"""
This is a virtual instrument for a homodyne source
Instrument is CBox, UHFQC, ATS and DDM compatible
"""
shared_kwargs = ['RF', 'LO', 'AWG']
def __init__(self, name, RF, LO, AWG=None, acquisition_instr=None,
acquisition_instr_controller=None,
single_sideband_demod=False, **kw):
self.RF = RF
self.common_init(name, LO, AWG, acquisition_instr,
single_sideband_demod, **kw)
self.add_parameter('RF_power', label='RF power',
unit='dBm', vals=vals.Numbers(),
set_cmd=self._set_RF_power,
get_cmd=self._get_RF_power)
self.add_parameter('acquisition_instr_controller',
set_cmd=self._set_acquisition_instr_controller,
get_cmd=self._get_acquisition_instr_controller,
vals=vals.Anything())
self.acquisition_instr_controller(acquisition_instr_controller)
self._RF_power = None
def common_init(self, name, LO, AWG, acquisition_instr='CBox',
single_sideband_demod=False, **kw):
logging.info(__name__ + ' : Initializing instrument')
Instrument.__init__(self, name, **kw)
self.LO = LO
self.AWG = AWG
self.add_parameter('frequency', label='Heterodyne frequency',
unit='Hz', vals=vals.Numbers(9e3, 40e9),
get_cmd=self._get_frequency,
set_cmd=self._set_frequency)
self.add_parameter('f_RO_mod', label='Intermodulation frequency',
unit='Hz', vals=vals.Numbers(-600e6, 600e6),
set_cmd=self._set_f_RO_mod,
get_cmd=self._get_f_RO_mod)
self.add_parameter('single_sideband_demod', vals=vals.Bool(),
label='Single sideband demodulation',
parameter_class=ManualParameter,
initial_value=single_sideband_demod)
self.add_parameter('acquisition_instr', vals=vals.Strings(),
label='Acquisition instrument',
set_cmd=self._set_acquisition_instr,
get_cmd=self._get_acquisition_instr)
self.add_parameter('nr_averages', label='Number of averages',
vals=vals.Numbers(min_value=0, max_value=1e6),
parameter_class=ManualParameter,
initial_value=1024)
self.add_parameter('status', vals=vals.Enum('On', 'Off'),
set_cmd=self._set_status,
get_cmd=self._get_status)
self.add_parameter('trigger_separation', label='Trigger separation',
unit='s', vals=vals.Numbers(0),
set_cmd=self._set_trigger_separation,
get_cmd=self._get_trigger_separation)
self.add_parameter('RO_length', label='Readout length',
unit='s', vals=vals.Numbers(0),
set_cmd=self._set_RO_length,
get_cmd=self._get_RO_length)
self.add_parameter('auto_seq_loading', vals=vals.Bool(),
label='Automatic AWG sequence loading',
parameter_class=ManualParameter,
initial_value=True)
self.add_parameter('acq_marker_channels', vals=vals.Strings(),
label='Acquisition trigger channels',
docstring='comma (,) separated string of marker channels',
set_cmd=self._set_acq_marker_channels,
get_cmd=self._get_acq_marker_channels)
self._trigger_separation = 10e-6
self._RO_length = 2274e-9
self._awg_seq_filename = ''
self._awg_seq_parameters_changed = True
self._UHFQC_awg_parameters_changed = True
self.acquisition_instr(acquisition_instr)
self.status('Off')
self._f_RO_mod = 10e6
self._frequency = 5e9
self.frequency(5e9)
self.f_RO_mod(10e6)
self._eps = 0.01 # Hz slack for comparing frequencies
self._acq_marker_channels = ('ch4_marker1,ch4_marker2,' +
'ch3_marker1,ch3_marker2')
def prepare(self, get_t_base=True):
# Uploading the AWG sequence
if (self._awg_seq_filename not in self.AWG.setup_filename() or
self._awg_seq_parameters_changed) and self.auto_seq_loading():
self._awg_seq_filename = \
st_seqs.generate_and_upload_marker_sequence(
5e-9, self.trigger_separation(), RF_mod=False,
acq_marker_channels=self.acq_marker_channels())
self._awg_seq_parameters_changed = False
# Preparing the acquisition instruments
if 'CBox' in self.acquisition_instr():
self.prepare_CBox(get_t_base)
elif 'UHFQC' in self.acquisition_instr():
self.prepare_UHFQC()
elif 'ATS' in self.acquisition_instr():
self.prepare_ATS(get_t_base)
elif 'DDM' in self.acquisition_instr():
self.prepare_DDM()
else:
raise ValueError("Invalid acquisition instrument {} in {}".format(
self.acquisition_instr(), self.__class__.__name__))
# turn on the AWG and the MWGs
self.AWG.run()
self.on()
def prepare_CBox(self, get_t_base=True):
if get_t_base:
trace_length = 512
tbase = np.arange(0, 5*trace_length, 5)*1e-9
self.cosI = np.floor(
127.*np.cos(2*np.pi*self.f_RO_mod()*tbase))
self.sinI = np.floor(
127.*np.sin(2*np.pi*self.f_RO_mod()*tbase))
self._acquisition_instr.sig0_integration_weights(self.cosI)
self._acquisition_instr.sig1_integration_weights(self.sinI)
# because using integrated avg
self._acquisition_instr.set('nr_samples', 1)
self._acquisition_instr.nr_averages(int(self.nr_averages()))
def prepare_UHFQC(self):
# Upload the correct integration weigths
if self.single_sideband_demod():
self._acquisition_instr.prepare_SSB_weight_and_rotation(
IF=self.f_RO_mod(), weight_function_I=0, weight_function_Q=1)
else:
self._acquisition_instr.prepare_DSB_weight_and_rotation(
IF=self.f_RO_mod(), weight_function_I=0, weight_function_Q=1)
# this sets the result to integration and rotation outcome
self._acquisition_instr.quex_rl_source(2)
# only one sample to average over
self._acquisition_instr.quex_rl_length(1)
self._acquisition_instr.quex_rl_avgcnt(
int(np.log2(self.nr_averages())))
self._acquisition_instr.quex_wint_length(
int(self.RO_length()*1.8e9))
# Configure the result logger to not do any averaging
# The AWG program uses userregs/0 to define the number o
# iterations in the loop
self._acquisition_instr.awgs_0_userregs_0(
int(self.nr_averages()))
self._acquisition_instr.awgs_0_userregs_1(0) # 0 for rl, 1 for iavg
self._acquisition_instr.acquisition_initialize([0, 1], 'rl')
self.scale_factor_UHFQC = 1/(1.8e9*self.RO_length() *
int(self.nr_averages()))
def prepare_ATS(self, get_t_base=True):
if self.AWG != None:
if (self._awg_seq_filename not in self.AWG.setup_filename() or
self._awg_seq_parameters_changed) and \
self.auto_seq_loading():
self._awg_seq_filename = \
st_seqs.generate_and_upload_marker_sequence(
self.RO_length(), self.trigger_separation(),
RF_mod=False,
acq_marker_channels=self.acq_marker_channels())
self._awg_seq_parameters_changed = False
if get_t_base:
self._acquisition_instr_controller.demodulation_frequency = \
self.f_RO_mod()
buffers_per_acquisition = 8
self._acquisition_instr_controller.update_acquisitionkwargs(
# mode='NPT',
samples_per_record=64*1000, # 4992,
records_per_buffer=int(
self.nr_averages()/buffers_per_acquisition), # 70 segments
buffers_per_acquisition=buffers_per_acquisition,
channel_selection='AB',
transfer_offset=0,
external_startcapture='ENABLED',
enable_record_headers='DISABLED',
alloc_buffers='DISABLED',
fifo_only_streaming='DISABLED',
interleave_samples='DISABLED',
get_processed_data='DISABLED',
allocated_buffers=buffers_per_acquisition,
buffer_timeout=1000)
def probe(self):
if 'CBox' in self.acquisition_instr():
return self.probe_CBox()
elif 'UHFQC' in self.acquisition_instr():
return self.probe_UHFQC()
elif 'ATS' in self.acquisition_instr():
return self.probe_ATS()
elif 'DDM' in self.acquisition_instr():
return self.probe_DDM()
else:
raise ValueError("Invalid acquisition instrument {} in {}".format(
self.acquisition_instr(), self.__class__.__name__))
def probe_CBox(self):
if self.single_sideband_demod():
demodulation_mode = 'single'
else:
demodulation_mode = 'double'
self._acquisition_instr.acquisition_mode('idle')
self._acquisition_instr.acquisition_mode('integration averaging')
self._acquisition_instr.demodulation_mode(demodulation_mode)
# d = self.CBox.get_integrated_avg_results()
# quick fix for spec units. Need to properrly implement it later
# after this, output is in V
scale_factor_dacV = 1.*0.75/128.
# scale_factor_integration = 1./float(self.f_RO_mod() *
# self.CBox.nr_samples()*5e-9)
scale_factor_integration = \
1 / (64.*self._acquisition_instr.integration_length())
factor = scale_factor_dacV*scale_factor_integration
d = np.double(self._acquisition_instr.get_integrated_avg_results()) \
* np.double(factor)
# print(np.size(d))
return d[0][0]+1j*d[1][0]
def probe_UHFQC(self):
if self._awg_seq_parameters_changed or \
self._UHFQC_awg_parameters_changed:
self.prepare()
dataset = self._acquisition_instr.acquisition_poll(
samples=1, acquisition_time=0.001, timeout=10)
dat = (self.scale_factor_UHFQC*dataset[0][0] +
self.scale_factor_UHFQC*1j*dataset[1][0])
return dat
def probe_ATS(self):
dat = self._acquisition_instr_controller.acquisition()
return dat
def finish(self):
self.off()
self.AWG.stop()
def _set_frequency(self, val):
self._frequency = val
# this is the definition agreed upon in issue 131
self.RF.frequency(val)
self.LO.frequency(val-self._f_RO_mod)
def _get_frequency(self):
freq = self.RF.frequency()
LO_freq = self.LO.frequency()
if abs(LO_freq - freq + self._f_RO_mod) > self._eps:
logging.warning('f_RO_mod between RF and LO is not set correctly')
logging.warning('\tf_RO_mod = {}, LO_freq = {}, RF_freq = {}'
.format(self._f_RO_mod, LO_freq, freq))
if abs(self._frequency - freq) > self._eps:
logging.warning('Heterodyne frequency does not match RF frequency')
return self._frequency
def _set_f_RO_mod(self, val):
self._f_RO_mod = val
self.LO.frequency(self._frequency - val)
def _get_f_RO_mod(self):
freq = self.RF.frequency()
LO_freq = self.LO.frequency()
if abs(LO_freq - freq + self._f_RO_mod) > self._eps:
logging.warning('f_RO_mod between RF and LO is not set correctly')
logging.warning('\tf_RO_mod = {}, LO_freq = {}, RF_freq = {}'
.format(self._f_RO_mod, LO_freq, freq))
return self._f_RO_mod
def _set_RF_power(self, val):
self.RF.power(val)
self._RF_power = val
# internally stored to allow setting RF from stored setting
def _get_RF_power(self):
return self._RF_power
def _set_status(self, val):
if val == 'On':
self.on()
else:
self.off()
def _get_status(self):
if (self.LO.status().startswith('On') and
self.RF.status().startswith('On')):
return 'On'
elif (self.LO.status().startswith('Off') and
self.RF.status().startswith('Off')):
return 'Off'
else:
return 'LO: {}, RF: {}'.format(self.LO.status(), self.RF.status())
def on(self):
if self.LO.status().startswith('Off') or \
self.RF.status().startswith('Off'):
wait = True
else:
wait = False
self.LO.on()
self.RF.on()
if wait:
# The R&S MWG-s take some time to stabilize their outputs
time.sleep(1.0)
def off(self):
self.LO.off()
self.RF.off()
def _get_acquisition_instr(self):
# Specifying the int_avg det here should allow replacing it with ATS
# or potential digitizer acquisition easily
if self._acquisition_instr is None:
return None
else:
return self._acquisition_instr.name
def _set_acquisition_instr(self, acquisition_instr):
# Specifying the int_avg det here should allow replacing it with ATS
# or potential digitizer acquisition easily
if acquisition_instr is None:
self._acquisition_instr = None
else:
self._acquisition_instr = self.find_instrument(acquisition_instr)
self._awg_seq_parameters_changed = True
self._UHFQC_awg_parameters_changed = True
def _get_acquisition_instr_controller(self):
# Specifying the int_avg det here should allow replacing it with ATS
# or potential digitizer acquisition easily
if self._acquisition_instr_controller == None:
return None
else:
return self._acquisition_instr_controller.name
def _set_acquisition_instr_controller(self, acquisition_instr_controller):
# Specifying the int_avg det here should allow replacing it with ATS
# or potential digitizer acquisition easily
if acquisition_instr_controller == None:
self._acquisition_instr_controller = None
else:
self._acquisition_instr_controller = \
self.find_instrument(acquisition_instr_controller)
print("controller initialized")
def _set_trigger_separation(self, val):
if val != self._trigger_separation:
self._awg_seq_parameters_changed = True
self._trigger_separation = val
def _get_trigger_separation(self):
return self._trigger_separation
def _set_RO_length(self, val):
if val != self._RO_length and 'UHFQC' not in self.acquisition_instr():
self._awg_seq_parameters_changed = True
self._RO_length = val
def _get_RO_length(self):
return self._RO_length
def _set_acq_marker_channels(self, channels):
if channels != self._acq_marker_channels:
self._awg_seq_parameters_changed = True
self._acq_marker_channels = channels
def _get_acq_marker_channels(self):
return self._acq_marker_channels
def finish(self):
if 'UHFQC' in self.acquisition_instr():
self._acquisition_instr.acquisition_finalize()
def get_demod_array(self):
return self.cosI, self.sinI
def demodulate_data(self, dat):
"""
Returns a complex point in the IQ plane by integrating and demodulating
the data. Demodulation is done based on the 'f_RO_mod' and
'single_sideband_demod' parameters of the Homodyne instrument.
"""
if self._f_RO_mod != 0:
# self.cosI is based on the time_base and created in self.init()
if self._single_sideband_demod is True:
# this definition for demodulation is consistent with
# issue #131
I = np.average(self.cosI * dat[0] + self.sinI * dat[1])
Q = np.average(-self.sinI * dat[0] + self.cosI * dat[1])
else: # Single channel demodulation, defaults to using channel 1
I = 2*np.average(dat[0]*self.cosI)
Q = 2*np.average(dat[0]*self.sinI)
else:
I = np.average(dat[0])
Q = np.average(dat[1])
return I+1.j*Q
class LO_modulated_Heterodyne(HeterodyneInstrument):
"""
Homodyne instrument for pulse modulated LO.
Inherits functionality from the HeterodyneInstrument
AWG is used for modulating signal and triggering the CBox for acquisition
or AWG is used for triggering and UHFQC for modulation and acquisition
"""
shared_kwargs = ['RF', 'LO', 'AWG']
def __init__(self, name, LO, AWG, acquisition_instr='CBox',
single_sideband_demod=False, **kw):
self.common_init(name, LO, AWG, acquisition_instr,
single_sideband_demod, **kw)
self.add_parameter('mod_amp', label='Modulation amplitude',
unit='V', vals=vals.Numbers(0, 1),
set_cmd=self._set_mod_amp,
get_cmd=self._get_mod_amp)
self.add_parameter('acquisition_delay', label='Acquisition delay',
unit='s', vals=vals.Numbers(0, 1e-3),
set_cmd=self._set_acquisition_delay,
get_cmd=self._get_acquisition_delay)
self.add_parameter('I_channel', vals=vals.Strings(),
label='I channel',
set_cmd=self._set_I_channel,
get_cmd=self._get_I_channel)
self.add_parameter('Q_channel', vals=vals.Strings(),
label='Q channel',
set_cmd=self._set_Q_channel,
get_cmd=self._get_Q_channel)
self._f_RO_mod = 10e6
self._frequency = 5e9
self.f_RO_mod(10e6)
self.frequency(5e9)
self._mod_amp = 0
self.mod_amp(.5)
self._acquisition_delay = 0
self.acquisition_delay(200e-9)
self._I_channel = 'ch3'
self._Q_channel = 'ch4'
def prepare_DDM(self):
for i, channel in enumerate([1, 2]):
eval("self._acquisition_instr.ch_pair1_weight{}_wint_intlength({})".format(
channel, RO_length*500e6))
self._acquisition_instr.ch_pair1_tvmode_naverages(self.nr_averages())
self._acquisition_instr.ch_pair1_tvmode_nsegments(1)
self.scale_factor = 1/(500e6*RO_length)/127
def prepare_CBox(self, get_t_base=True):
"""
uses the AWG to generate the modulating signal and CBox for readout
"""
# only uploads a seq to AWG if something changed
if (self._awg_seq_filename not in self.AWG.setup_filename() or
self._awg_seq_parameters_changed) and self.auto_seq_loading():
self._awg_seq_filename = \
st_seqs.generate_and_upload_marker_sequence(
self.RO_length(), self.trigger_separation(), RF_mod=True,
IF=self.f_RO_mod(), mod_amp=0.5,
acq_marker_channels=self.acq_marker_channels(),
I_channel=self.I_channel(), Q_channel=self.Q_channel())
self.AWG.ch3_amp(self.mod_amp())
self.AWG.ch4_amp(self.mod_amp())
self._awg_seq_parameters_changed = False
self.AWG.ch3_amp(self.mod_amp())
self.AWG.ch4_amp(self.mod_amp())
if get_t_base is True:
trace_length = self.CBox.nr_samples()
tbase = np.arange(0, 5*trace_length, 5)*1e-9
self.cosI = np.cos(2*np.pi*self.f_RO_mod()*tbase)
self.sinI = np.sin(2*np.pi*self.f_RO_mod()*tbase)
self.CBox.nr_samples(1) # because using integrated avg
def probe_CBox(self):
if self._awg_seq_parameters_changed:
self.prepare()
self.CBox.acquisition_mode(0)
self.CBox.acquisition_mode(4)
d = self.CBox.get_integrated_avg_results()
return d[0][0]+1j*d[1][0]
def probe_DDM(self):
# t0 = time.time()
self._acquisition_instr.ch_pair1_tvmode_enable.set(1)
self._acquisition_instr.ch_pair1_run.set(1)
dataI = eval(
"self._acquisition_instr.ch_pair1_weight{}_tvmode_data()".format(1))
dataQ = eval(
"self._acquisition_instr.ch_pair1_weight{}_tvmode_data()".format(2))
dat = (self.scale_factor*dataI+self.scale_factor*1j*dataQ)
# t1 = time.time()
# print("time for DDM polling", t1-t0)
return dat
def _set_frequency(self, val):
self._frequency = val
# this is the definition agreed upon in issue 131
# AWG modulation ensures that signal ends up at RF-frequency
self.LO.frequency(val-self._f_RO_mod)
def _get_frequency(self):
freq = self.LO.frequency() + self._f_RO_mod
if abs(self._frequency - freq) > self._eps:
logging.warning('Homodyne frequency does not match LO frequency'
' + RO_mod frequency')
return self._frequency
def _set_f_RO_mod(self, val):
if val != self._f_RO_mod:
if 'CBox' in self.acquisition_instr():
self._awg_seq_parameters_changed = True
elif 'UHFQC' in self.acquisition_instr():
self._UHFQC_awg_parameters_changed = True
self.frequency(self._frequency)
self._f_RO_mod = val
def _get_f_RO_mod(self):
return self._f_RO_mod
def _set_mod_amp(self, val):
if val != self._mod_amp:
if 'UHFQC' in self.acquisition_instr():
self._UHFQC_awg_parameters_changed = True
self._mod_amp = val
def _get_mod_amp(self):
return self._mod_amp
def _set_acquisition_delay(self, val):
if 'UHFQC' in self.acquisition_instr():
self._acquisition_instr.awgs_0_userregs_2(int(val*1.8e9/8))
else:
raise NotImplementedError("CBox heterodyne driver does not "
"implement acquisition delay")
self._acquisition_delay = val
def _get_acquisition_delay(self):
return self._acquisition_delay
def on(self):
if self.LO.status().startswith('Off'):
self.LO.on()
time.sleep(1.0)
def off(self):
self.LO.off()
def _get_status(self):
return self.LO.get('status')
def _set_I_channel(self, channel):
if channel != self._I_channel and \
not 'UHFQC' in self.acquisition_instr():
self._awg_seq_parameters_changed = True
self._I_channel = channel
def _get_I_channel(self):
return self._I_channel
def _set_Q_channel(self, channel):
if channel != self._Q_channel and \
not 'UHFQC' in self.acquisition_instr():
self._awg_seq_parameters_changed = True
self._Q_channel = channel
def _get_Q_channel(self):
return self._Q_channel
Fixed some bugs a linter told me about
# This is a virtual instrument abstracting a homodyne
# source which controls RF and LO sources
import logging
import numpy as np
from time import time
from qcodes.instrument.base import Instrument
from qcodes.utils import validators as vals
from qcodes.instrument.parameter import ManualParameter
# Used for uploading the right AWG sequences
from pycqed.measurement.pulse_sequences import standard_sequences as st_seqs
import time
class HeterodyneInstrument(Instrument):
"""
This is a virtual instrument for a homodyne source
Instrument is CBox, UHFQC, ATS and DDM compatible
"""
shared_kwargs = ['RF', 'LO', 'AWG']
def __init__(self, name, RF, LO, AWG=None, acquisition_instr=None,
acquisition_instr_controller=None,
single_sideband_demod=False, **kw):
self.RF = RF
self.common_init(name, LO, AWG, acquisition_instr,
single_sideband_demod, **kw)
self.add_parameter('RF_power', label='RF power',
unit='dBm', vals=vals.Numbers(),
set_cmd=self._set_RF_power,
get_cmd=self._get_RF_power)
self.add_parameter('acquisition_instr_controller',
set_cmd=self._set_acquisition_instr_controller,
get_cmd=self._get_acquisition_instr_controller,
vals=vals.Anything())
self.acquisition_instr_controller(acquisition_instr_controller)
self._RF_power = None
def common_init(self, name, LO, AWG, acquisition_instr='CBox',
single_sideband_demod=False, **kw):
logging.info(__name__ + ' : Initializing instrument')
Instrument.__init__(self, name, **kw)
self.LO = LO
self.AWG = AWG
self.add_parameter('frequency', label='Heterodyne frequency',
unit='Hz', vals=vals.Numbers(9e3, 40e9),
get_cmd=self._get_frequency,
set_cmd=self._set_frequency)
self.add_parameter('f_RO_mod', label='Intermodulation frequency',
unit='Hz', vals=vals.Numbers(-600e6, 600e6),
set_cmd=self._set_f_RO_mod,
get_cmd=self._get_f_RO_mod)
self.add_parameter('single_sideband_demod', vals=vals.Bool(),
label='Single sideband demodulation',
parameter_class=ManualParameter,
initial_value=single_sideband_demod)
self.add_parameter('acquisition_instr', vals=vals.Strings(),
label='Acquisition instrument',
set_cmd=self._set_acquisition_instr,
get_cmd=self._get_acquisition_instr)
self.add_parameter('nr_averages', label='Number of averages',
vals=vals.Numbers(min_value=0, max_value=1e6),
parameter_class=ManualParameter,
initial_value=1024)
self.add_parameter('status', vals=vals.Enum('On', 'Off'),
set_cmd=self._set_status,
get_cmd=self._get_status)
self.add_parameter('trigger_separation', label='Trigger separation',
unit='s', vals=vals.Numbers(0),
set_cmd=self._set_trigger_separation,
get_cmd=self._get_trigger_separation)
self.add_parameter('RO_length', label='Readout length',
unit='s', vals=vals.Numbers(0),
set_cmd=self._set_RO_length,
get_cmd=self._get_RO_length)
self.add_parameter('auto_seq_loading', vals=vals.Bool(),
label='Automatic AWG sequence loading',
parameter_class=ManualParameter,
initial_value=True)
self.add_parameter('acq_marker_channels', vals=vals.Strings(),
label='Acquisition trigger channels',
docstring='comma (,) separated string of marker channels',
set_cmd=self._set_acq_marker_channels,
get_cmd=self._get_acq_marker_channels)
self._trigger_separation = 10e-6
self._RO_length = 2274e-9
self._awg_seq_filename = ''
self._awg_seq_parameters_changed = True
self._UHFQC_awg_parameters_changed = True
self.acquisition_instr(acquisition_instr)
self.status('Off')
self._f_RO_mod = 10e6
self._frequency = 5e9
self.frequency(5e9)
self.f_RO_mod(10e6)
self._eps = 0.01 # Hz slack for comparing frequencies
self._acq_marker_channels = ('ch4_marker1,ch4_marker2,' +
'ch3_marker1,ch3_marker2')
def prepare(self, get_t_base=True):
# Uploading the AWG sequence
if (self._awg_seq_filename not in self.AWG.setup_filename() or
self._awg_seq_parameters_changed) and self.auto_seq_loading():
self._awg_seq_filename = \
st_seqs.generate_and_upload_marker_sequence(
5e-9, self.trigger_separation(), RF_mod=False,
acq_marker_channels=self.acq_marker_channels())
self._awg_seq_parameters_changed = False
# Preparing the acquisition instruments
if 'CBox' in self.acquisition_instr():
self.prepare_CBox(get_t_base)
elif 'UHFQC' in self.acquisition_instr():
self.prepare_UHFQC()
elif 'ATS' in self.acquisition_instr():
self.prepare_ATS(get_t_base)
elif 'DDM' in self.acquisition_instr():
self.prepare_DDM()
else:
raise ValueError("Invalid acquisition instrument {} in {}".format(
self.acquisition_instr(), self.__class__.__name__))
# turn on the AWG and the MWGs
self.AWG.run()
self.on()
def prepare_CBox(self, get_t_base=True):
if get_t_base:
trace_length = 512
tbase = np.arange(0, 5*trace_length, 5)*1e-9
self.cosI = np.floor(
127.*np.cos(2*np.pi*self.f_RO_mod()*tbase))
self.sinI = np.floor(
127.*np.sin(2*np.pi*self.f_RO_mod()*tbase))
self._acquisition_instr.sig0_integration_weights(self.cosI)
self._acquisition_instr.sig1_integration_weights(self.sinI)
# because using integrated avg
self._acquisition_instr.set('nr_samples', 1)
self._acquisition_instr.nr_averages(int(self.nr_averages()))
def prepare_UHFQC(self):
# Upload the correct integration weigths
if self.single_sideband_demod():
self._acquisition_instr.prepare_SSB_weight_and_rotation(
IF=self.f_RO_mod(), weight_function_I=0, weight_function_Q=1)
else:
self._acquisition_instr.prepare_DSB_weight_and_rotation(
IF=self.f_RO_mod(), weight_function_I=0, weight_function_Q=1)
# this sets the result to integration and rotation outcome
self._acquisition_instr.quex_rl_source(2)
# only one sample to average over
self._acquisition_instr.quex_rl_length(1)
self._acquisition_instr.quex_rl_avgcnt(
int(np.log2(self.nr_averages())))
self._acquisition_instr.quex_wint_length(
int(self.RO_length()*1.8e9))
# Configure the result logger to not do any averaging
# The AWG program uses userregs/0 to define the number o
# iterations in the loop
self._acquisition_instr.awgs_0_userregs_0(
int(self.nr_averages()))
self._acquisition_instr.awgs_0_userregs_1(0) # 0 for rl, 1 for iavg
self._acquisition_instr.acquisition_initialize([0, 1], 'rl')
self.scale_factor_UHFQC = 1/(1.8e9*self.RO_length() *
int(self.nr_averages()))
def prepare_ATS(self, get_t_base=True):
if self.AWG != None:
if (self._awg_seq_filename not in self.AWG.setup_filename() or
self._awg_seq_parameters_changed) and \
self.auto_seq_loading():
self._awg_seq_filename = \
st_seqs.generate_and_upload_marker_sequence(
self.RO_length(), self.trigger_separation(),
RF_mod=False,
acq_marker_channels=self.acq_marker_channels())
self._awg_seq_parameters_changed = False
if get_t_base:
self._acquisition_instr_controller.demodulation_frequency = \
self.f_RO_mod()
buffers_per_acquisition = 8
self._acquisition_instr_controller.update_acquisitionkwargs(
# mode='NPT',
samples_per_record=64*1000, # 4992,
records_per_buffer=int(
self.nr_averages()/buffers_per_acquisition), # 70 segments
buffers_per_acquisition=buffers_per_acquisition,
channel_selection='AB',
transfer_offset=0,
external_startcapture='ENABLED',
enable_record_headers='DISABLED',
alloc_buffers='DISABLED',
fifo_only_streaming='DISABLED',
interleave_samples='DISABLED',
get_processed_data='DISABLED',
allocated_buffers=buffers_per_acquisition,
buffer_timeout=1000)
def probe(self):
if 'CBox' in self.acquisition_instr():
return self.probe_CBox()
elif 'UHFQC' in self.acquisition_instr():
return self.probe_UHFQC()
elif 'ATS' in self.acquisition_instr():
return self.probe_ATS()
elif 'DDM' in self.acquisition_instr():
return self.probe_DDM()
else:
raise ValueError("Invalid acquisition instrument {} in {}".format(
self.acquisition_instr(), self.__class__.__name__))
def probe_CBox(self):
if self.single_sideband_demod():
demodulation_mode = 'single'
else:
demodulation_mode = 'double'
self._acquisition_instr.acquisition_mode('idle')
self._acquisition_instr.acquisition_mode('integration averaging')
self._acquisition_instr.demodulation_mode(demodulation_mode)
# d = self.CBox.get_integrated_avg_results()
# quick fix for spec units. Need to properrly implement it later
# after this, output is in V
scale_factor_dacV = 1.*0.75/128.
# scale_factor_integration = 1./float(self.f_RO_mod() *
# self.CBox.nr_samples()*5e-9)
scale_factor_integration = \
1 / (64.*self._acquisition_instr.integration_length())
factor = scale_factor_dacV*scale_factor_integration
d = np.double(self._acquisition_instr.get_integrated_avg_results()) \
* np.double(factor)
# print(np.size(d))
return d[0][0]+1j*d[1][0]
def probe_UHFQC(self):
if self._awg_seq_parameters_changed or \
self._UHFQC_awg_parameters_changed:
self.prepare()
dataset = self._acquisition_instr.acquisition_poll(
samples=1, acquisition_time=0.001, timeout=10)
dat = (self.scale_factor_UHFQC*dataset[0][0] +
self.scale_factor_UHFQC*1j*dataset[1][0])
return dat
def probe_ATS(self):
dat = self._acquisition_instr_controller.acquisition()
return dat
def finish(self):
self.off()
if self.AWG is not None:
self.AWG.stop()
if 'UHFQC' in self.acquisition_instr():
self._acquisition_instr.acquisition_finalize()
def _set_frequency(self, val):
self._frequency = val
# this is the definition agreed upon in issue 131
self.RF.frequency(val)
self.LO.frequency(val-self._f_RO_mod)
def _get_frequency(self):
freq = self.RF.frequency()
LO_freq = self.LO.frequency()
if abs(LO_freq - freq + self._f_RO_mod) > self._eps:
logging.warning('f_RO_mod between RF and LO is not set correctly')
logging.warning('\tf_RO_mod = {}, LO_freq = {}, RF_freq = {}'
.format(self._f_RO_mod, LO_freq, freq))
if abs(self._frequency - freq) > self._eps:
logging.warning('Heterodyne frequency does not match RF frequency')
return self._frequency
def _set_f_RO_mod(self, val):
self._f_RO_mod = val
self.LO.frequency(self._frequency - val)
def _get_f_RO_mod(self):
freq = self.RF.frequency()
LO_freq = self.LO.frequency()
if abs(LO_freq - freq + self._f_RO_mod) > self._eps:
logging.warning('f_RO_mod between RF and LO is not set correctly')
logging.warning('\tf_RO_mod = {}, LO_freq = {}, RF_freq = {}'
.format(self._f_RO_mod, LO_freq, freq))
return self._f_RO_mod
def _set_RF_power(self, val):
self.RF.power(val)
self._RF_power = val
# internally stored to allow setting RF from stored setting
def _get_RF_power(self):
return self._RF_power
def _set_status(self, val):
if val == 'On':
self.on()
else:
self.off()
def _get_status(self):
if (self.LO.status().startswith('On') and
self.RF.status().startswith('On')):
return 'On'
elif (self.LO.status().startswith('Off') and
self.RF.status().startswith('Off')):
return 'Off'
else:
return 'LO: {}, RF: {}'.format(self.LO.status(), self.RF.status())
def on(self):
if self.LO.status().startswith('Off') or \
self.RF.status().startswith('Off'):
wait = True
else:
wait = False
self.LO.on()
self.RF.on()
if wait:
# The R&S MWG-s take some time to stabilize their outputs
time.sleep(1.0)
def off(self):
self.LO.off()
self.RF.off()
def _get_acquisition_instr(self):
# Specifying the int_avg det here should allow replacing it with ATS
# or potential digitizer acquisition easily
if self._acquisition_instr is None:
return None
else:
return self._acquisition_instr.name
def _set_acquisition_instr(self, acquisition_instr):
# Specifying the int_avg det here should allow replacing it with ATS
# or potential digitizer acquisition easily
if acquisition_instr is None:
self._acquisition_instr = None
else:
self._acquisition_instr = self.find_instrument(acquisition_instr)
self._awg_seq_parameters_changed = True
self._UHFQC_awg_parameters_changed = True
def _get_acquisition_instr_controller(self):
# Specifying the int_avg det here should allow replacing it with ATS
# or potential digitizer acquisition easily
if self._acquisition_instr_controller == None:
return None
else:
return self._acquisition_instr_controller.name
def _set_acquisition_instr_controller(self, acquisition_instr_controller):
# Specifying the int_avg det here should allow replacing it with ATS
# or potential digitizer acquisition easily
if acquisition_instr_controller == None:
self._acquisition_instr_controller = None
else:
self._acquisition_instr_controller = \
self.find_instrument(acquisition_instr_controller)
print("controller initialized")
def _set_trigger_separation(self, val):
if val != self._trigger_separation:
self._awg_seq_parameters_changed = True
self._trigger_separation = val
def _get_trigger_separation(self):
return self._trigger_separation
def _set_RO_length(self, val):
if val != self._RO_length and 'UHFQC' not in self.acquisition_instr():
self._awg_seq_parameters_changed = True
self._RO_length = val
def _get_RO_length(self):
return self._RO_length
def _set_acq_marker_channels(self, channels):
if channels != self._acq_marker_channels:
self._awg_seq_parameters_changed = True
self._acq_marker_channels = channels
def _get_acq_marker_channels(self):
return self._acq_marker_channels
def get_demod_array(self):
return self.cosI, self.sinI
def demodulate_data(self, dat):
"""
Returns a complex point in the IQ plane by integrating and demodulating
the data. Demodulation is done based on the 'f_RO_mod' and
'single_sideband_demod' parameters of the Homodyne instrument.
"""
if self._f_RO_mod != 0:
# self.cosI is based on the time_base and created in self.init()
if self._single_sideband_demod is True:
# this definition for demodulation is consistent with
# issue #131
I = np.average(self.cosI * dat[0] + self.sinI * dat[1])
Q = np.average(-self.sinI * dat[0] + self.cosI * dat[1])
else: # Single channel demodulation, defaults to using channel 1
I = 2*np.average(dat[0]*self.cosI)
Q = 2*np.average(dat[0]*self.sinI)
else:
I = np.average(dat[0])
Q = np.average(dat[1])
return I+1.j*Q
class LO_modulated_Heterodyne(HeterodyneInstrument):
"""
Homodyne instrument for pulse modulated LO.
Inherits functionality from the HeterodyneInstrument
AWG is used for modulating signal and triggering the CBox for acquisition
or AWG is used for triggering and UHFQC for modulation and acquisition
"""
shared_kwargs = ['RF', 'LO', 'AWG']
def __init__(self, name, LO, AWG, acquisition_instr='CBox',
single_sideband_demod=False, **kw):
self.common_init(name, LO, AWG, acquisition_instr,
single_sideband_demod, **kw)
self.add_parameter('mod_amp', label='Modulation amplitude',
unit='V', vals=vals.Numbers(0, 1),
set_cmd=self._set_mod_amp,
get_cmd=self._get_mod_amp)
self.add_parameter('acquisition_delay', label='Acquisition delay',
unit='s', vals=vals.Numbers(0, 1e-3),
set_cmd=self._set_acquisition_delay,
get_cmd=self._get_acquisition_delay)
self.add_parameter('I_channel', vals=vals.Strings(),
label='I channel',
set_cmd=self._set_I_channel,
get_cmd=self._get_I_channel)
self.add_parameter('Q_channel', vals=vals.Strings(),
label='Q channel',
set_cmd=self._set_Q_channel,
get_cmd=self._get_Q_channel)
self._f_RO_mod = 10e6
self._frequency = 5e9
self.f_RO_mod(10e6)
self.frequency(5e9)
self._mod_amp = 0
self.mod_amp(.5)
self._acquisition_delay = 0
self.acquisition_delay(200e-9)
self._I_channel = 'ch3'
self._Q_channel = 'ch4'
def prepare_DDM(self):
for i, channel in enumerate([1, 2]):
eval("self._acquisition_instr.ch_pair1_weight{}_wint_intlength({})".format(
channel, self.RO_length*500e6))
self._acquisition_instr.ch_pair1_tvmode_naverages(self.nr_averages())
self._acquisition_instr.ch_pair1_tvmode_nsegments(1)
self.scale_factor = 1/(500e6*self.RO_length)/127
def prepare_CBox(self, get_t_base=True):
"""
uses the AWG to generate the modulating signal and CBox for readout
"""
# only uploads a seq to AWG if something changed
if (self._awg_seq_filename not in self.AWG.setup_filename() or
self._awg_seq_parameters_changed) and self.auto_seq_loading():
self._awg_seq_filename = \
st_seqs.generate_and_upload_marker_sequence(
self.RO_length(), self.trigger_separation(), RF_mod=True,
IF=self.f_RO_mod(), mod_amp=0.5,
acq_marker_channels=self.acq_marker_channels(),
I_channel=self.I_channel(), Q_channel=self.Q_channel())
self.AWG.ch3_amp(self.mod_amp())
self.AWG.ch4_amp(self.mod_amp())
self._awg_seq_parameters_changed = False
self.AWG.ch3_amp(self.mod_amp())
self.AWG.ch4_amp(self.mod_amp())
if get_t_base is True:
trace_length = self.CBox.nr_samples()
tbase = np.arange(0, 5*trace_length, 5)*1e-9
self.cosI = np.cos(2*np.pi*self.f_RO_mod()*tbase)
self.sinI = np.sin(2*np.pi*self.f_RO_mod()*tbase)
self.CBox.nr_samples(1) # because using integrated avg
def probe_CBox(self):
if self._awg_seq_parameters_changed:
self.prepare()
self.CBox.acquisition_mode(0)
self.CBox.acquisition_mode(4)
d = self.CBox.get_integrated_avg_results()
return d[0][0]+1j*d[1][0]
def probe_DDM(self):
# t0 = time.time()
self._acquisition_instr.ch_pair1_tvmode_enable.set(1)
self._acquisition_instr.ch_pair1_run.set(1)
dataI = eval(
"self._acquisition_instr.ch_pair1_weight{}_tvmode_data()".format(1))
dataQ = eval(
"self._acquisition_instr.ch_pair1_weight{}_tvmode_data()".format(2))
dat = (self.scale_factor*dataI+self.scale_factor*1j*dataQ)
# t1 = time.time()
# print("time for DDM polling", t1-t0)
return dat
def _set_frequency(self, val):
self._frequency = val
# this is the definition agreed upon in issue 131
# AWG modulation ensures that signal ends up at RF-frequency
self.LO.frequency(val-self._f_RO_mod)
def _get_frequency(self):
freq = self.LO.frequency() + self._f_RO_mod
if abs(self._frequency - freq) > self._eps:
logging.warning('Homodyne frequency does not match LO frequency'
' + RO_mod frequency')
return self._frequency
def _set_f_RO_mod(self, val):
if val != self._f_RO_mod:
if 'CBox' in self.acquisition_instr():
self._awg_seq_parameters_changed = True
elif 'UHFQC' in self.acquisition_instr():
self._UHFQC_awg_parameters_changed = True
self.frequency(self._frequency)
self._f_RO_mod = val
def _get_f_RO_mod(self):
return self._f_RO_mod
def _set_mod_amp(self, val):
if val != self._mod_amp:
if 'UHFQC' in self.acquisition_instr():
self._UHFQC_awg_parameters_changed = True
self._mod_amp = val
def _get_mod_amp(self):
return self._mod_amp
def _set_acquisition_delay(self, val):
if 'UHFQC' in self.acquisition_instr():
self._acquisition_instr.awgs_0_userregs_2(int(val*1.8e9/8))
else:
raise NotImplementedError("CBox heterodyne driver does not "
"implement acquisition delay")
self._acquisition_delay = val
def _get_acquisition_delay(self):
return self._acquisition_delay
def on(self):
if self.LO.status().startswith('Off'):
self.LO.on()
time.sleep(1.0)
def off(self):
self.LO.off()
def _get_status(self):
return self.LO.get('status')
def _set_I_channel(self, channel):
if channel != self._I_channel and \
not 'UHFQC' in self.acquisition_instr():
self._awg_seq_parameters_changed = True
self._I_channel = channel
def _get_I_channel(self):
return self._I_channel
def _set_Q_channel(self, channel):
if channel != self._Q_channel and \
not 'UHFQC' in self.acquisition_instr():
self._awg_seq_parameters_changed = True
self._Q_channel = channel
def _get_Q_channel(self):
return self._Q_channel
|
#-*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from shop.models.address import Country, Address
#class ClientAdmin(ModelAdmin):
# pass
#admin.site.register(Client, ClientAdmin)
class CountryAdmin(ModelAdmin):
pass
class AddressAdmin(ModelAdmin):
list_display = (
'name', 'address', 'address2', 'zip_code', 'city', 'country',
'user_shipping', 'user_billing')
raw_id_fields = ('user_shipping', 'user_billing')
admin.site.register(Address, AddressAdmin)
admin.site.register(Country, CountryAdmin)
removed dead code
|
#- coding: utf-8
from bs4 import BeautifulSoup
import cookielib
import re
import urllib
import urllib2
URL_CORREIOS = 'http://www.buscacep.correios.com.br/servicos/dnec/'
class Correios():
def __init__(self, proxy=None):
cj = cookielib.LWPCookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(cj)
if proxy:
proxy_handler = urllib2.ProxyHandler({'http': proxy})
opener = urllib2.build_opener(proxy_handler, cookie_handler)
else:
opener = urllib2.build_opener(cookie_handler)
urllib2.install_opener(opener)
def _url_open(self, url, data=None, headers=None):
if headers == None:
headers = {}
headers['User-agent'] = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
encoded_data = urllib.urlencode(data) if data else None
url = URL_CORREIOS + url
req = urllib2.Request(url, encoded_data, headers)
handle = urllib2.urlopen(req)
return handle
def _parse_detalhe(self, html):
soup = BeautifulSoup(html.decode('ISO-8859-1'), "html.parser")
value_cells = soup.find_all('td', attrs={'class': 'value'})
values = [cell.get_text() for cell in value_cells]
localidade, uf = values[2].split('/')
values_dict = {
'Logradouro': values[0],
'Bairro': values[1],
'Localidade': localidade,
'UF': uf,
'CEP': values[3]
}
return values_dict
def _parse_linha_tabela(self, tr):
values = [cell.get_text() for cell in tr.find_all('td')]
keys = ['Logradouro', 'Bairro', 'Localidade', 'UF', 'CEP']
return dict(zip(keys, values))
def _parse_tabela(self, html):
soup = BeautifulSoup(html, "html.parser")
linhas = soup.find_all('tr', attrs={
'onclick': re.compile(r"javascript:detalharCep\('\d+','\d+'\);")
})
return [self._parse_linha_tabela(linha) for linha in linhas]
def _parse_faixa(self, html):
if u"não está cadastrada" in html.decode('cp1252'):
return None
ceps = re.findall('\d{5}-\d{3}', html)
if len(ceps) == 4 or len(ceps) == 6: #uf (+ uf) + cidade com range
return tuple(ceps[-2:])
elif len(ceps) == 3 or len(ceps) == 5: #uf (+ uf) + cidade com cep único
return ceps[-1]
else:
raise ValueError("HTML recebido não é válido")
def detalhe(self, posicao=0):
"""Retorna o detalhe de um CEP da última lista de resultados"""
handle = self._url_open('detalheCEPAction.do', {'Metodo': 'detalhe',
'TipoCep': 2,
'Posicao': posicao + 1,
'CEP': ''})
html = handle.read()
return self._parse_detalhe(html)
def consulta_faixa(self, localidade, uf):
"""Consulta site e retorna faixa para localidade"""
url = 'consultaFaixaCepAction.do'
data = {
'UF': uf,
'Localidade': localidade.encode('cp1252'),
'cfm': '1',
'Metodo': 'listaFaixaCEP',
'TipoConsulta': 'faixaCep',
'StartRow': '1',
'EndRow': '10',
}
html = self._url_open(url, data).read()
return self._parse_faixa(html)
def consulta(self, endereco, primeiro=False,
uf=None, localidade=None, tipo=None, numero=None):
"""Consulta site e retorna lista de resultados"""
if uf is None:
url = 'consultaEnderecoAction.do'
data = {
'relaxation': endereco.encode('ISO-8859-1'),
'TipoCep': 'ALL',
'semelhante': 'N',
'cfm': 1,
'Metodo': 'listaLogradouro',
'TipoConsulta': 'relaxation',
'StartRow': '1',
'EndRow': '10'
}
else:
url = 'consultaLogradouroAction.do'
data = {
'Logradouro': endereco.encode('ISO-8859-1'),
'UF': uf,
'TIPO': tipo,
'Localidade': localidade.encode('ISO-8859-1'),
'Numero': numero,
'cfm': 1,
'Metodo': 'listaLogradouro',
'TipoConsulta': 'logradouro',
'StartRow': '1',
'EndRow': '10'
}
h = self._url_open(url, data)
html = h.read()
if primeiro:
return self.detalhe()
else:
return self._parse_tabela(html)
2to3 tool changes
#- coding: utf-8
from bs4 import BeautifulSoup
import http.cookiejar
import re
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
URL_CORREIOS = 'http://www.buscacep.correios.com.br/servicos/dnec/'
class Correios():
def __init__(self, proxy=None):
cj = http.cookiejar.LWPCookieJar()
cookie_handler = urllib.request.HTTPCookieProcessor(cj)
if proxy:
proxy_handler = urllib.request.ProxyHandler({'http': proxy})
opener = urllib.request.build_opener(proxy_handler, cookie_handler)
else:
opener = urllib.request.build_opener(cookie_handler)
urllib.request.install_opener(opener)
def _url_open(self, url, data=None, headers=None):
if headers == None:
headers = {}
headers['User-agent'] = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
encoded_data = urllib.parse.urlencode(data) if data else None
url = URL_CORREIOS + url
req = urllib.request.Request(url, encoded_data, headers)
handle = urllib.request.urlopen(req)
return handle
def _parse_detalhe(self, html):
soup = BeautifulSoup(html.decode('ISO-8859-1'), "html.parser")
value_cells = soup.find_all('td', attrs={'class': 'value'})
values = [cell.get_text() for cell in value_cells]
localidade, uf = values[2].split('/')
values_dict = {
'Logradouro': values[0],
'Bairro': values[1],
'Localidade': localidade,
'UF': uf,
'CEP': values[3]
}
return values_dict
def _parse_linha_tabela(self, tr):
values = [cell.get_text() for cell in tr.find_all('td')]
keys = ['Logradouro', 'Bairro', 'Localidade', 'UF', 'CEP']
return dict(list(zip(keys, values)))
def _parse_tabela(self, html):
soup = BeautifulSoup(html, "html.parser")
linhas = soup.find_all('tr', attrs={
'onclick': re.compile(r"javascript:detalharCep\('\d+','\d+'\);")
})
return [self._parse_linha_tabela(linha) for linha in linhas]
def _parse_faixa(self, html):
if "não está cadastrada" in html.decode('cp1252'):
return None
ceps = re.findall('\d{5}-\d{3}', html)
if len(ceps) == 4 or len(ceps) == 6: #uf (+ uf) + cidade com range
return tuple(ceps[-2:])
elif len(ceps) == 3 or len(ceps) == 5: #uf (+ uf) + cidade com cep único
return ceps[-1]
else:
raise ValueError("HTML recebido não é válido")
def detalhe(self, posicao=0):
"""Retorna o detalhe de um CEP da última lista de resultados"""
handle = self._url_open('detalheCEPAction.do', {'Metodo': 'detalhe',
'TipoCep': 2,
'Posicao': posicao + 1,
'CEP': ''})
html = handle.read()
return self._parse_detalhe(html)
def consulta_faixa(self, localidade, uf):
"""Consulta site e retorna faixa para localidade"""
url = 'consultaFaixaCepAction.do'
data = {
'UF': uf,
'Localidade': localidade.encode('cp1252'),
'cfm': '1',
'Metodo': 'listaFaixaCEP',
'TipoConsulta': 'faixaCep',
'StartRow': '1',
'EndRow': '10',
}
html = self._url_open(url, data).read()
return self._parse_faixa(html)
def consulta(self, endereco, primeiro=False,
uf=None, localidade=None, tipo=None, numero=None):
"""Consulta site e retorna lista de resultados"""
if uf is None:
url = 'consultaEnderecoAction.do'
data = {
'relaxation': endereco.encode('ISO-8859-1'),
'TipoCep': 'ALL',
'semelhante': 'N',
'cfm': 1,
'Metodo': 'listaLogradouro',
'TipoConsulta': 'relaxation',
'StartRow': '1',
'EndRow': '10'
}
else:
url = 'consultaLogradouroAction.do'
data = {
'Logradouro': endereco.encode('ISO-8859-1'),
'UF': uf,
'TIPO': tipo,
'Localidade': localidade.encode('ISO-8859-1'),
'Numero': numero,
'cfm': 1,
'Metodo': 'listaLogradouro',
'TipoConsulta': 'logradouro',
'StartRow': '1',
'EndRow': '10'
}
h = self._url_open(url, data)
html = h.read()
if primeiro:
return self.detalhe()
else:
return self._parse_tabela(html)
|
# Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import hashlib
import os
import guild.opref
from guild import pip_util
from guild import util
from guild import var
class ResolutionError(Exception):
pass
class Resolver(object):
def resolve(self):
raise NotImplementedError()
class FileResolver(Resolver):
def __init__(self, source, working_dir=None):
working_dir = working_dir or os.getcwd()
self.source = source
self.working_dir = working_dir
def resolve(self):
source_path = os.path.join(
self.working_dir, self.source.parsed_uri.path)
if not os.path.exists(source_path):
raise ResolutionError("file '%s' does not exist" % source_path)
return source_path
class URLResolver(Resolver):
def __init__(self, source):
self.source = source
def resolve(self):
download_dir = self._source_download_dir()
util.ensure_dir(download_dir)
try:
return pip_util.download_url(
self.source.uri,
download_dir,
self.source.sha256)
except pip_util.HashMismatch as e:
raise ResolutionError(
"bad sha256 for '%s' (expected %s but got %s)"
% (self.source.uri, e.expected, e.actual))
def _source_download_dir(self):
key = "\n".join(self.source.parsed_uri).encode("utf-8")
digest = hashlib.sha224(key).hexdigest()
return os.path.join(var.cache_dir("resources"), digest)
class OperationOutputResolver(Resolver):
def __init__(self, source, modeldef):
self.source = source
self.modeldef = modeldef
def resolve(self):
opref, path = self._source_opref()
run = self._latest_op_run(opref)
source_path = os.path.join(run.path, path)
if not os.path.exists(source_path):
raise ResolutionError(
"required output '%s' was not generated in the latest run (%s)"
% (path, run.id))
return source_path
def _source_opref(self):
spec = self.source.parsed_uri.path
try:
opref, path = guild.opref.OpRef.from_string(spec)
except guild.opref.OpRefError:
raise ResolutionError(
"inavlid operation reference '%s'" % spec)
else:
if path[:2] != "//":
raise ResolutionError(
"invalid operation source path '%s' "
"(paths must start with '//')" % path)
normalized_path = os.path.join(*path[2:].split("/"))
return opref, normalized_path
def _latest_op_run(self, opref):
resolved_opref = self._fully_resolve_opref(opref)
completed_op_runs = var.run_filter("all", [
var.run_filter("any", [
var.run_filter("attr", "status", "completed"),
var.run_filter("attr", "status", "running"),
]),
resolved_opref.is_op_run])
runs = var.runs(sort=["-started"], filter=completed_op_runs)
if runs:
return runs[0]
raise ResolutionError(
"no suitable run for %s" % self._opref_desc(resolved_opref))
@staticmethod
def _opref_desc(opref):
pkg = "." if opref.pkg_type == "modelfile" else opref.pkg_name
return "%s/%s:%s" % (pkg, opref.model_name, opref.op_name)
def _fully_resolve_opref(self, opref):
assert opref.op_name, opref
pkg_type = (
opref.pkg_type or
"package" if opref.pkg_name else "modelfile")
pkg_name = (
opref.pkg_name or
os.path.abspath(self.modeldef.modelfile.dir))
model_name = opref.model_name or self.modeldef.name
op_name = opref.op_name
return guild.opref.OpRef(
pkg_type=pkg_type,
pkg_name=pkg_name,
pkg_version=None,
model_name=model_name,
op_name=op_name)
Require path for operation source
# Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import hashlib
import os
import guild.opref
from guild import pip_util
from guild import util
from guild import var
class ResolutionError(Exception):
pass
class Resolver(object):
def resolve(self):
raise NotImplementedError()
class FileResolver(Resolver):
def __init__(self, source, working_dir=None):
working_dir = working_dir or os.getcwd()
self.source = source
self.working_dir = working_dir
def resolve(self):
source_path = os.path.join(
self.working_dir, self.source.parsed_uri.path)
if not os.path.exists(source_path):
raise ResolutionError("file '%s' does not exist" % source_path)
return source_path
class URLResolver(Resolver):
def __init__(self, source):
self.source = source
def resolve(self):
download_dir = self._source_download_dir()
util.ensure_dir(download_dir)
try:
return pip_util.download_url(
self.source.uri,
download_dir,
self.source.sha256)
except pip_util.HashMismatch as e:
raise ResolutionError(
"bad sha256 for '%s' (expected %s but got %s)"
% (self.source.uri, e.expected, e.actual))
def _source_download_dir(self):
key = "\n".join(self.source.parsed_uri).encode("utf-8")
digest = hashlib.sha224(key).hexdigest()
return os.path.join(var.cache_dir("resources"), digest)
class OperationOutputResolver(Resolver):
def __init__(self, source, modeldef):
self.source = source
self.modeldef = modeldef
def resolve(self):
opref, path = self._source_opref()
run = self._latest_op_run(opref)
source_path = os.path.join(run.path, path)
if not os.path.exists(source_path):
raise ResolutionError(
"required output '%s' was not generated in the latest run (%s)"
% (path, run.id))
return source_path
def _source_opref(self):
spec = self.source.parsed_uri.path
try:
opref, path = guild.opref.OpRef.from_string(spec)
except guild.opref.OpRefError:
raise ResolutionError(
"inavlid operation reference '%s'" % spec)
else:
if path[:2] != "//":
raise ResolutionError(
"invalid operation source path '%s' "
"(paths must start with '//')" % path)
normalized_path = os.path.join(*path[2:].split("/"))
if not normalized_path:
raise ResolutionError(
"invalid operation source path '%s' "
"(paths may not be empty)" % path)
return opref, normalized_path
def _latest_op_run(self, opref):
resolved_opref = self._fully_resolve_opref(opref)
completed_op_runs = var.run_filter("all", [
var.run_filter("any", [
var.run_filter("attr", "status", "completed"),
var.run_filter("attr", "status", "running"),
]),
resolved_opref.is_op_run])
runs = var.runs(sort=["-started"], filter=completed_op_runs)
if runs:
return runs[0]
raise ResolutionError(
"no suitable run for %s" % self._opref_desc(resolved_opref))
@staticmethod
def _opref_desc(opref):
pkg = "." if opref.pkg_type == "modelfile" else opref.pkg_name
return "%s/%s:%s" % (pkg, opref.model_name, opref.op_name)
def _fully_resolve_opref(self, opref):
assert opref.op_name, opref
pkg_type = (
opref.pkg_type or
"package" if opref.pkg_name else "modelfile")
pkg_name = (
opref.pkg_name or
os.path.abspath(self.modeldef.modelfile.dir))
model_name = opref.model_name or self.modeldef.name
op_name = opref.op_name
return guild.opref.OpRef(
pkg_type=pkg_type,
pkg_name=pkg_name,
pkg_version=None,
model_name=model_name,
op_name=op_name)
|
__author__ = "Andrew Emerick"
__email__ = "aemerick11@gmail.com"
import yt
yt.funcs.mylog.setLevel(40)
from yt.fields.api import ValidateDataField, ValidateParameter
from yt.units import dimensions
import numpy as np
from collections import Iterable
from galaxy_analysis.static_data import AMU,\
MOLECULAR_WEIGHT
from galaxy_analysis.utilities import convert_abundances
from galaxy_analysis.utilities import utilities
from galaxy_analysis import star_analysis
from galaxy_analysis.misc import dm_halo
from galaxy_analysis.yt_fields import ionization
from onezone import data_tables, radiation
GRACKLE_IMPORTED = True
try:
import pygrackle
from pygrackle.grackle_wrapper import \
calculate_cooling_time
except:
GRACKLE_IMPORTED = False
SE_table = data_tables.StellarEvolutionData()
FIELDS_DEFINED = False
def _density_function_generator(asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _density(field, data):
dens = data[('enzo', a + '_Density')].value
dens = dens * data.ds.mass_unit / data.ds.length_unit**3
return dens.convert_to_units('g/cm**3')
return _density
for a in asym:
yt.add_field(('gas', a + "_Density"), function = return_function(a), units = 'g/cm**3')
return
def _abundance_function_generator(asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _abundance(field,data):
mass = data[('gas', a + '_Mass')].convert_to_units('g').value
abund = convert_abundances.elemental_abundance( a, mass)
return abund
return _abundance
if not ('H' in asym):
asym = asym + ['H']
for a in asym:
yt.add_field(('gas',a + '_Abundance'), function = return_function(a), units = "")
if (('O' in asym) and ('Mg' in asym) and ('Si' in asym)):
def _alpha_abundance(field, data):
alpha = data[('gas','O_Abundance')] + data[('gas','Mg_Abundance')] + data[('gas','Si_Abundance')]
return alpha / 3.0
yt.add_field(('gas','alpha_Abundance'), function=_alpha_abundance, units="")
return
def _mass_function_generator(asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _mass(field,data):
ele_dens = data[('enzo', a + '_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
mass = (ele_dens * data['cell_volume']).convert_to_units('g')
return mass
return _mass
nfields = 0
for a in asym:
yt.add_field(('gas', a + '_Mass'), function = return_function(a), units='g')
nfields = nfields + 1
if (('O' in asym) and ('Mg' in asym) and ('Si' in asym)):
def _alpha_mass(field, data):
alpha = data[('gas','O_Mass')] + data[('gas','Mg_Mass')] + data[('gas','Si_Mass')]
return alpha
yt.add_field(('gas','alpha_Mass'), function = _alpha_mass, units = "g") # mass of alpha elements
nfields = nfields + 1
return nfields
#
# Construct arbitrary mass fraction derived fields in yt
# using a loop to generate functions
#
def _mass_fraction_function_generator(asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _mass_fraction(field,data):
ele_dens = data[('enzo', a + '_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
dens = data[('enzo','Density')].convert_to_cgs()
mass_fraction = ele_dens / dens
return mass_fraction
return _mass_fraction
nfields = 0
for a in asym:
yt.add_field(('gas', a + '_Fraction'), function = return_function(a), units="")
nfields = nfields + 1
if (('O' in asym) and ('Mg' in asym) and ('Si' in asym)):
def _alpha_mass_fraction(field, data):
alpha = data[('gas','O_Fraction')] + data[('gas','Mg_Fraction')] +\
data[('gas','Si_Fraction')]
return alpha / 3.0
# yt.add_field(('gas','alpha_Fraction'), function = _alpha_mass_fraction, units = "")
if (('S' in asym) and ('Ca' in asym)):
def _alpha_5(field, data):
alpha = data[('gas','alpha_Fraction')]*3.0 + data[('gas','S_Fraction')] +\
data[('gas','Ca_Fraction')]
return alpha / 5.0
# yt.add_field( ('gas','alpha_5_Fraction'), function = _alpha_5, units = "")
nfields = nfields + 1
return nfields
#
# Construct arbitrary number density derived fields in yt
# using a loop to generate functions
#
def _number_density_function_generator(asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _number_density(field,data):
ele_dens = data[('enzo', a + '_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
n = ele_dens / (MOLECULAR_WEIGHT[a] * AMU * yt.units.g)
return n.convert_to_cgs()
return _number_density
nfields = 0
for a in asym:
yt.add_field(('gas', a + '_Number_Density'),
function = return_function(a), units='cm**(-3)')
nfields = nfields + 1
# make a metal number density field - make an assumption about the metal molecular weight
def _metal_number_density(field,data):
ele_dens = data[('enzo','Metal_Density')].convert_to_units('g/cm**3')
n = ele_dens / (MOLECULAR_WEIGHT['metal'] * AMU * yt.units.g)
return n.convert_to_cgs()
yt.add_field(('gas', 'Metal_Number_Density'),
function = _metal_number_density, units = 'cm**(-3)')
def _H_total_number_density(field,data):
n_H = data['H_p0_number_density'] + data['H_p1_number_density']
try:
n_H += data['H_m1_number_density'] +\
0.5 * (data['H2_p0_number_density'] + data['H2_p1_number_density'])
except:
n_H += np.zeros(np.shape(n_H))
return n_H.convert_to_units('cm**(-3)')
yt.add_field(('gas','H_total_number_density'),
function = _H_total_number_density, units = 'cm**(-3)')
return nfields
def _ionization_state_generator(metals):
temp_ions = ionization.get_ions()
all_ions = [x for x in temp_ions if ionization.get_elements(x) in metals]
def return_function(ion):
def _ion_density(field,data):
ele = ionization.get_elements(ion)
n_ele = data[('gas', ele + '_Number_Density')].value
n = data['H_total_number_density'].to('cm**(-3)').value
n = np.log10(n)
T = data['Temperature'].to('K').value
T = np.log10(T)
f_ion = 10.0**(ionization.get_ion_fraction(n, T, ion))
return (n_ele * f_ion) * yt.units.cm**(-3)
return _ion_density
nfields = 0
for ion in all_ions:
yt.add_field(('gas', ion + '_Number_Density'),
function = return_function(ion), units='cm**(-3)')
nfields = nfields + 1
return nfields
def _generate_rates(ds):
"""
Generate reaction rate equations
"""
kunit = 1.0 #
def k8(T):
T = T.convert_to_units('K').value
k8 = 1.35E-9 * (T**(9.8493E-2) + 3.2852E-1 *\
T**(5.561E-1) + 2.881E-7 * T**2.1826) /\
(1.0 + 6.191E-3 * T**1.0461 + 8.9712E-11*T**3.0424 +\
3.2576E-14 * T**3.7741)
return k8 # now in cgs
def k10(T): # have arg T to look same as other functions
k10 = 6.0E-10
return k10
def k19(T):
T = T.convert_to_units('K').value
k19 = 5.0E-7 * np.sqrt(100.0 / T)
return k19
def k22(T):
T = T.convert_to_units('K').value
# for GLover 2008 three body rate ONLY
k22 = 7.7E-31 / T**0.464
return k22
def k13(T):
T = T.convert_to_units('K').value
k13 = 10.0**(-178.4239 - 68.42243 * np.log10(T)
+ 43.20243 * np.log10(T)**2
- 4.633167 * np.log10(T)**3
+ 69.70086 * np.log10(1.0 + 40870.38 / T)
- (23705.7 / T))
###############
# above is for use with Glover 2008 three body rate
#
# T_eV = (T / yt.physical_constants.k_b).convert_to_units('eV').value
# T_lim = 0.3
#
# k13 = np.ones(np.shape(T)) * 1.0E-20
#
# k13[ T > T_lim] = 1.0670825E-10*T_eV**(2.012) /\
# (np.exp(4.463/T_eV) * (1.0 + 0.2472 * T_eV)**3.512)
return k13
def k11(T):
T_eV = (T / yt.physical_constants.k_b).convert_to_units('eV').value
T_lim = 0.3
k11 = np.ones(np.shape(T)) * 1.0E-20
log_T = np.log(T.convert_to_units('K').value)
k11[ T_eV > T_lim] = (np.exp(-21237.15/T) *\
(- 3.3232183E-7
+ 3.3735382E-7 * log_T
- 1.4491368E-7 * log_T**2
+ 3.4172805E-8 * log_T**3
- 4.7813720E-9 * log_T**4
+ 3.9731542E-10 * log_T**5
- 1.8171411E-11 * log_T**6
+ 3.5311932E-13 * log_T**7))
return k11
def k12(T):
T_eV = (T / yt.physical_constants.k_b).convert_to_units('eV').value
T_lim = 0.3
k12 = np.ones(np.shape(T)) * 1.0E-20
k12[T>T_lim] = 4.4886E-9*T**(0.109127)*np.exp(-101858.0/T)
return k12
# def k29(T)
#
# return k29
reaction_units = 1.0 / yt.units.cm**3 / yt.units.s
ru_label = '1/s/cm**3'
def _k8_reaction_rate(field, data):
rr = k8(data['Temperature'].convert_to_units('K'))
rr = 2.0 * rr * data[('gas','H_m1_number_density')].convert_to_cgs().value *\
data[('gas','H_p0_number_density')].convert_to_cgs().value
return rr * reaction_units
def _k10_reaction_rate(field,data):
rr = k10(data['Temperature'].convert_to_units('K'))
rr = rr * data[('gas','H2_p1_number_density')].convert_to_cgs().value *\
data[('gas','H_p0_number_density')].convert_to_cgs().value
return rr * reaction_units
def _k19_reaction_rate(field,data):
rr = k19(data['Temperature'].convert_to_units('K'))
rr = rr * data[('gas','H2_p1_number_density')].convert_to_cgs().value *\
data[('gas','H_m1_number_density')].convert_to_cgs().value
return rr * reaction_units
def _k22_reaction_rate(field, data):
rr = k22(data['Temperature'].convert_to_units('K'))
rr = rr * (data[('gas','H_p0_number_density')].convert_to_cgs().value)**3
return rr * reaction_units
yt.add_field(('gas','k8_rr'),
function = _k8_reaction_rate, units = ru_label)
yt.add_field(('gas','k10_rr'),
function = _k10_reaction_rate, units = ru_label)
yt.add_field(('gas','k19_rr'),
function = _k19_reaction_rate, units = ru_label)
yt.add_field(('gas','k22_rr'),
function = _k22_reaction_rate, units = ru_label)
# rates
# scoef
# 2.0 * ( k8 * HM * HI - set with interp
# k10 * H2II * HI * 0.5 - set with interp
# k19 * H2II * HM * 0.5 - set with interp
# k22 * HI * (HI*HI) - set with interp
#
# acoef
# k13*HI + k11*HII + k12*de + k29 + k31shield
# idust
# + 2 * H2dust * HI * rhoH
#
# H2I = (scoef*dtit + H2I) / (1.0 + acoef*dtit)
#
# passes density field
return
def _particle_abundance_function_generator(asym, ds = None):
if not (ds is None):
if not (ds.parameters['NumberOfParticles'] > 0):
return
if not isinstance(asym, Iterable):
asym = [asym]
if not ('H' in asym):
asym = asym + ['H']
if not ('He' in asym):
asym = asym + ['He']
def return_function(element, fraction_field):
def _abundance(field, data):
mass = data[fraction_field].value * (data['birth_mass'].value *yt.units.Msun).convert_to_units('g').value
abund = convert_abundances.elemental_abundance(element, mass)
return abund
return _abundance
for a in asym:
fraction_field = ('io','particle_' + a + '_fraction')
yt.add_field(('io','particle_' + a + '_abundance'),
return_function(a, fraction_field), units = "", particle_type = True)
if (('O' in asym) and ('Mg' in asym) and ('Si' in asym)):
def _alpha_fraction(field, data):
alpha = data[('io','particle_O_fraction')] +\
data[('io','particle_Mg_fraction')] +\
data[('io','particle_Si_fraction')]
return alpha / 3.0
def _alpha_abundance(field, data):
alpha = data[('io','particle_O_abundance')] +\
data[('io','particle_Mg_abundance')] +\
data[('io','particle_Si_abundance')]
return alpha / 3.0
# yt.add_field(('io','particle_alpha_fraction'), function=_alpha_fraction, units = "", particle_type = True)
yt.add_field(('io','particle_alpha_abundance'), function=_alpha_abundance, units="", particle_type=True)
if ('S' in asym) and ('Ca' in asym):
def _alpha_5_fraction(field,data):
alpha = (data[('io','particle_alpha_fraction')]*3.0 + data[('io','particle_S_fraction')] +\
data[('io','particle_Ca_fraction')]) / 5.0
return alpha
def _alpha_5(field,data):
alpha = data[('io','particle_alpha_abundance')]*3.0 + data[('io','particle_S_abundance')]+\
data[('io','particle_Ca_abundance')]
return alpha / 5.0
# yt.add_field( ('io','particle_alpha_5_fraction'), function = _alpha_5_fraction, units = "", particle_type =True)
yt.add_field( ('io','particle_alpha_5_abundance'), function = _alpha_5, units = "", particle_type = True)
return
def _particle_abundance_ratio_function_generator(ratios, ds = None):
if not (ds is None):
if not (ds.parameters['NumberOfParticles'] > 0):
return
if not isinstance(ratios, Iterable):
ratios = [ratios]
def return_function(ele1, ele2, field1, field2):
def _abundance_ratio(field, data):
mass1 = data[field1].value
mass1 = ((mass1 * data['birth_mass'].value) * yt.units.Msun).convert_to_units('g')
mass2 = data[field2].value
mass2 = ((mass2 * data['birth_mass'].value) * yt.units.Msun).convert_to_units('g')
ratio = convert_abundances.abundance_ratio( (ele1, mass1.value), (ele2, mass2.value), 'mass')
return ratio * yt.units.g / yt.units.g
return _abundance_ratio
nfields = 0
for r in ratios:
ele1, ele2 = r.rsplit('/')
field1 = ('io','particle_' + ele1 + '_fraction')
field2 = ('io','particle_' + ele2 + '_fraction')
fieldname = 'particle_' + ele1 + '_over_' + ele2
yt.add_field(('io', fieldname), function = return_function(ele1,ele2,field1,field2),
units = "", particle_type = True)
nfields = nfields + 1
def _alpha_return_function(base):
def _alpha_over_x(field,data):
alpha = data[('io','particle_alpha_abundance')]
x = data[('io','particle_' + base + '_abundance')]
return convert_abundances.abundance_ratio(('alpha',alpha),(base,x), 'abundances')
return _alpha_over_x
def _alpha_5_return_function(base):
def _alpha_5_over_x(field,data):
alpha = data[('io','particle_alpha_5_abundance')]
x = data[('io','particle_' + base + '_abundance')]
return convert_abundances.abundance_ratio(('alpha_5',alpha),(base,x), 'abundances')
return _alpha_5_over_x
denoms = [x.split('/')[1] for x in ratios]
denoms = np.unique(denoms)
if ('io','particle_alpha_abundance') in ds.derived_field_list:
for x in denoms:
yt.add_field(('io','particle_alpha_over_' + x), function = _alpha_return_function(x), units = "", particle_type = True)
yt.add_field(('io','particle_alpha_5_over_' + x), function = _alpha_5_return_function(x), units = "", particle_type = True)
# def _alpha_over_Fe(field,data):
# alpha = data[('io','particle_alpha_abundance')]
# Fe = data[('io','particle_Fe_abundance')]
# return convert_abundances.abundance_ratio( ('alpha', alpha), ('Fe', Fe), 'abundances')
# yt.add_field(('io','particle_alpha_over_Fe'), function = _alpha_over_Fe, units = "", particle_type = True)
# def _alpha_5_over_Fe(field,data):
# alpha = data[('io','particle_alpha_5_abundance')]
# Fe = data[('io','particle_Fe_abundance')]
# return convert_abundances.abundance_ratio( ('alpha_5', alpha), ('Fe', Fe), 'abundances')
# yt.add_field(('io','particle_alpha_5_over_Fe'), function = _alpha_5_over_Fe, units = "", particle_type = True)
return nfields
#
# Construct arbitrary abundance ratio fields in yt
# using a function generator
#
def _abundance_ratio_function_generator(ratios, H_mode = 'total'):
if not isinstance(ratios, Iterable):
ratios = [ratios]
def _H_mass(data, mode):
if mode == 'total':
mass = data[('enzo','HI_Density')] + data[('enzo','HII_Density')]
if ('enzo','H2I_Density') in data.ds.field_list:
mass += data[('enzo','HM_Density')] + data[('enzo','H2I_Density')] +\
data[('enzo','H2II_Density')]
elif mode == 'HI':
mass = data[('enzo','HI_Density')]
elif mode == 'HII':
mass = data[('enzo','HII_Denisty')]
return mass
def return_function(ele1, ele2, field1, field2):
def _abundance_ratio(field, data):
if ele1 == 'H':
mass1 = _H_mass(data, H_mode)
else:
mass1 = data[field1]
if ele1 != 'H' and ele1 != 'He':
mass1 = mass1.value * data.ds.mass_unit / data.ds.length_unit**3
mass1 = (mass1 * data['cell_volume']).convert_to_units('g')
if ele2 == 'H':
mass2 = _H_mass(data, H_mode)
else:
mass2 = data[field2]
if ele2 != 'H' and ele2 != 'He':
mass2 = mass2.value * data.ds.mass_unit / data.ds.length_unit**3
mass2 = (mass2 * data['cell_volume']).convert_to_units('g')
ratio = convert_abundances.abundance_ratio( (ele1, mass1.value), (ele2, mass2.value), 'mass')
return ratio * yt.units.g / yt.units.g
return _abundance_ratio
nfields = 0
for r in ratios:
ele1, ele2 = r.rsplit('/')
if ele1 != 'H' and ele2 != 'He':
field1 = ('enzo', ele1 + '_Density')
else:
field1 = ('gas', ele1 + '_density')
if ele2 != 'H' and ele2 != 'He':
field2 = ('enzo', ele2 + '_Density')
else:
field2 = ('gas', ele2 + '_density')
fieldname = ele1 + '_over_' + ele2
yt.add_field(('gas', fieldname), function = return_function(ele1,ele2,field1,field2),
units = "")
nfields = nfields + 1
def _return_alpha_over_x(element_name):
def _alpha_over_x(field, data):
alpha = data[('gas','alpha_Abundance')]
x = data[('gas',element_name + '_Abundance')]
return convert_abundances.abundance_ratio(('alpha',alpha),(element_name,x),'abundances')
return _alpha_over_x
denoms = [x.split('/')[1] for x in ratios]
denoms = np.unique(denoms)
for x in denoms:
yt.add_field(('gas','alpha_over_' + x), function = _return_alpha_over_x(x), units = "")
return nfields
def generate_stellar_model_fields(ds):
if not (ds.parameters['NumberOfParticles'] > 0):
return
#
# luminosity, L_FUV, L_LW, Q0, Q1, E0, E1
#
field_names = ['luminosity','L_FUV','L_LW','Q0','Q1','E0','E1', 'Teff','R']
units = {'luminosity' : yt.units.erg/yt.units.s,
'L_FUV' : yt.units.erg/yt.units.s,
'L_LW' : yt.units.erg/yt.units.s,
'Q0' : 1.0 /yt.units.s, 'Q1' : 1.0 / yt.units.s,
'E0' : yt.units.erg, 'E1' : yt.units.erg, 'lifetime' : yt.units.s,
'Teff' : yt.units.K, 'R' : yt.units.cm}
unit_label = {'luminosity': 'erg/s', 'L_FUV' : 'erg/s', 'L_LW' : 'erg/s',
'Q0' : '1/s', 'Q1' : '1/s', 'E0' : 'erg', 'E1': 'erg', 'lifetime' : 's', 'Teff' : 'K',
'R' : 'cm'}
overload_type = {} # when generating stars, default is use type from simulation
for k in units.keys():
overload_type[k] = None # keep simulation type when making stars for all fields
overload_type['lifetime'] = 11 # lifetime field will now be the lifetime of the original MS star
# and NOT the lifetime from the simulation --- this is done by
# overloading the particle type to 11, or main sequence. This is
# also more useful, as the field would just be redundant
# info if this isn't done. See star_analysis code.
def _function_generator(field_name):
def _function(field, data):
if np.size(data['particle_mass']) == 1:
# this is ugly, but a way to bypass yt's validation step
# because throwing junk values into the routine below will cause problems
with utilities.nostdout():
p = star_analysis.get_star_property(ds, data, property_names = [field_name],
dummy_call = True, overload_type = None)
else:
p = star_analysis.get_star_property(ds, data, property_names = [field_name],
overload_type = overload_type[field_name] )
p = p * units[field_name]
return p
return _function
def _model_L0(field, data):
Q0 = data[('io','particle_model_Q0')]
E0 = data[('io','particle_model_E0')]
return (E0 * Q0).convert_to_units('erg/s')
def _model_L1(field, data):
Q1 = data[('io','particle_model_Q1')]
E1 = data[('io','particle_model_E1')]
return (E1 * Q1).convert_to_units('erg/s')
def _age(field, data):
p = data[('io','creation_time')]
t = data.ds.current_time
return (t - p).convert_to_units('Myr')
def _model_L_1_3eV(field, data):
Teff = data[('io','particle_model_Teff')].value
flux = np.array([radiation.BB_flux(1.0, 3.0, T) for T in Teff])
flux = flux * yt.units.erg / yt.units.s / yt.units.cm**2
SA = 4.0 * np.pi * data[('io','particle_model_R')]**2
return flux * SA
for field in field_names:
yt.add_field(('io', 'particle_model_' + field),
function = _function_generator(field), units=unit_label[field],
particle_type = True)
def _lifetime(field, data):
m = data['birth_mass'].value
z = data['metallicity_fraction'].value
if np.size(m) == 1: # get around yt's checking
lt = np.shape(m) * yt.units.Myr
else:
lt = np.zeros(np.size(m))
for i in np.arange(np.size(m)):
lt[i] = SE_table.interpolate({'mass' : m[i], 'metallicity' : z[i]}, 'lifetime')
lt = (lt * yt.units.s).convert_to_units('Myr')
return lt
yt.add_field(('io','particle_model_lifetime'), function = _lifetime, units = 'Myr',
particle_type = True)
yt.add_field(('io', 'particle_age'), function = _age, units = 'Myr',
particle_type = True)
yt.add_field(('io','particle_model_L0'), function = _model_L0, units = 'erg/s',
particle_type = True)
yt.add_field(('io','particle_model_L1'), function = _model_L1, units = 'erg/s',
particle_type = True)
yt.add_field(('io','particle_model_L_1_3eV'), function = _model_L_1_3eV, units = 'erg/s',
particle_type = True)
return
def _grackle_fields(ds):
"""
Fields that require use of pygrackle
"""
cdata = pygrackle.chemistry_data()
cdata.use_grackle = 1
enzo_to_grackle = { 'MultiSpecies' : 'primordial_chemistry',
'MetalCooling' : 'metal_cooling',
'self_shielding_method' : 'self_shielding_method',
#
# The below is broken in pygrackle - not sure of prob
# 'H2_self_shielding' : 'H2_self_shielding',
'grackle_data_file' : 'grackle_data_file',
'DensityUnits' : 'density_units',
'LengthUnits' : 'length_units',
'TimeUnits' : 'time_units',
'ComovingCoordinates': 'comoving_coordinates',
'with_radiative_cooling' : 'with_radiative_cooling',
'UVbackground' : 'UVbackground'}
for k in enzo_to_grackle:
setattr(cdata, enzo_to_grackle[k], ds.parameters[k])
cdata.a_units = 1.0
cdata.a_value = 1.0
cdata.velocity_units = cdata.length_units / cdata.time_units
#cdata.energy_units = (cdata.length_units / cdata.time_units)**2.0
cdata.initialize()
def _H2_self_shielding_length(field, data):
return data['dx'].convert_to_units('cm')
ds.add_field(('gas','H2_self_shielding_length'), function = _H2_self_shielding_length, units='cm')
def _cooling_time(field, data):
#
# This checks if yt is doing its fake-data error checking and
# gives dummy result... a bit of a hack....
#
# compute field in grackle grid-by-grid
field_list = data.ds.derived_field_list + data.ds.field_list
flat_fields = {}
fc = pygrackle.FluidContainer(cdata,10) # dummy container for now
for f1, f2, conv in pygrackle.fluid_container._needed_fields(fc):
if f2 not in field_list:
raise pygrackle.fluid_container.FieldNotFound(f2)
else:
flat_fields[f2[1]] = np.zeros(np.size( data['Density']))
for f1, f2,conv in pygrackle.fluid_container._needed_fields(fc):
flat_fields[f2[1]] = ((1.0*data[f2]).value).flatten() / conv
flat_fields[f2[1]] = np.zeros(np.size( data['Density'].flatten()))
flat_fields['cooling_time'] = np.zeros(np.size(flat_fields[f2[1]]))
# compute a new FC every 8192 zones
imin = 0
imax = 0
di = 8192
ncells = np.size(flat_fields['cooling_time'])
while imax < ncells:
imin = 1*imax
imax = np.min( [imax + di, ncells] )
fc = pygrackle.FluidContainer(cdata, imax - imin)
for f1, f2, conv in pygrackle.fluid_container._needed_fields(fc):
fc[f1][:] = flat_fields[f2[1]][imin:imax]
fc.calculate_cooling_time()
flat_fields['cooling_time'][imin:imax] = fc['cooling_time']
flat_fields['cooling_time'] = flat_fields['cooling_time'] * cdata.time_units
return flat_fields['cooling_time'].reshape( np.shape(data['Density'].value)) * yt.units.s
def _neg_cooling_time(field,data):
return -1.0 * data['cooling_time']
ds.add_field(('gas','cooling_time'), function = _cooling_time, units = 's')
ds.add_field(('gas','neg_cooling_time'), function = _neg_cooling_time, units = 's')
return
def _additional_helper_fields(fields):
nfields = 0
def _H_total_mass(field, data):
mass = data[('gas','H_p0_mass')] + data[('gas','H_p1_mass')]
return mass
def _He_total_mass(field, data):
mass = data[('gas','He_p0_mass')] + data[('gas','He_p1_mass')] +\
data[('gas','He_p2_mass')]
return mass
def _pe_heating_cgs(field,data):
pe = data[('enzo','Pe_heating_rate')].value
energy_unit = data.ds.mass_unit * data.ds.velocity_unit**2
pe = pe * energy_unit / data.ds.length_unit**3 / data.ds.time_unit
return pe.convert_to_units('erg/s/cm**3')
def _pe_heating_rate_masked(field, data):
pe = data[('gas','Pe_heating_rate')].convert_to_units('erg/s/cm**3')
x = 1.0 * pe
x[data['temperature'] > data.ds.parameters['IndividualStarFUVTemperatureCutoff']] = 0.0
return x
def _otlwcgs(field, data):
if ('enzo','OTLW_kdissH2I') in data.ds.field_list:
lw = data[('enzo','OTLW_kdissH2I')].value / data.ds.time_unit
else:
lw = np.zeros(np.shape(data['Density'])) / data.ds.time_unit
return lw.convert_to_units('1/s')
def _G_o(field,data):
pe = data[('gas','Pe_heating_rate')].convert_to_units('erg/s/cm**3').value
Z = (data['Metal_Density'] / data['Density']).value
n_H = (data['H_p0_number_density'] + data['H_p1_number_density'] + data['H_m1_number_density'] +\
0.5*(data['H2_p0_number_density'] + data['H2_p1_number_density'])).convert_to_units('cm**(-3)').value
logZ = np.log10(Z / 0.014)
g_to_d = np.zeros(np.shape(logZ))
g_to_d[logZ <= -0.73] = 0.68 - 3.08*logZ[logZ <= -0.73]
g_to_d[logZ > -0.73] = 2.21 - 1.00*logZ[logZ > -0.73]
d_to_g = 1.0 / (10.0**(g_to_d))
D = d_to_g / 6.616595E-3
epsilon = 0.01488637246 * (n_H)**(0.235269059)
atten = np.exp( - 1.33E-21 * D * data['dx'].convert_to_units('cm').value * n_H)
G_o = pe / (1.3E-24 * n_H * epsilon * D * atten)
return G_o * (data['Density'] / data['Density'])
def _G_eff(field,data):
pe = data[('gas','Pe_heating_rate')].convert_to_units('erg/s/cm**3').value
Z = (data['Metal_Density'] / data['Density']).value
n_H = (data['H_p0_number_density'] + data['H_p1_number_density'] + data['H_m1_number_density'] +\
0.5*(data['H2_p0_number_density'] + data['H2_p1_number_density'])).convert_to_units('cm**(-3)').value
logZ = np.log10(Z / 0.014)
g_to_d = np.zeros(np.shape(logZ))
g_to_d[ logZ <= -0.73] = 0.68 - 3.08*logZ[logZ <= -0.73]
g_to_d[ logZ > -0.73] = 2.21 - 1.00*logZ[logZ > -0.73]
d_to_g = 1.0 / (10.0**(g_to_d))
D = d_to_g / 6.616595E-3
epsilon = 0.01488637246 * (n_H)**(0.235269059)
# atten = np.exp( - 1.33E-21 * D * data['dx'].convert_to_units('cm').value * n_H)
G_eff = pe / (1.3E-24 * n_H * epsilon * D)
return G_eff * (data['Density'] / data['Density'])
def _FUV_flux(field, data):
# 1.59E-3 converts from MW normalized flux density to flux dens in cgs
G_o = data[('gas','G_o')] # relative to MW
G = (G_o.value * 1.59E-3) * yt.units.erg / yt.units.cm**2 /yt.units.s
return G
def _LW_flux(field, data):
LW_energy = 12.8 * yt.units.eV
H2Isigma = 3.71E-18 * yt.units.cm**(2)
if ('enzo','OTLW_kdissH2I') in data.ds.field_list:
kdissH2I = (data[('enzo','OTLW_kdissH2I')].value / data.ds.time_unit).convert_to_units('1/s')
else:
kdissH2I = (np.zeros(np.shape(data['Density'])) / data.ds.time_unit).to('1/s')
LW_flux = kdissH2I / H2Isigma * LW_energy
return LW_flux.convert_to_units('erg/cm**2/s')
def _Q0_flux(field, data):
E_HI = 13.6 * yt.units.eV
kph = data[('enzo','HI_kph')].convert_to_cgs()
n = data[('gas','H_p0_number_density')].convert_to_cgs()
dt = data.ds.parameters['dtPhoton']
V = data['cell_volume'].convert_to_cgs()
dx = data['dx'].convert_to_cgs()
s = 6.34629E-18 * yt.units.cm**(2) # cross section of HI at 13.6 eV
tau = s * n * dx
denom = 1.0 - np.exp(-tau)
Q = kph * n * V / denom # this gives number of photons / s
flux = Q * E_HI / dx**2
return flux.convert_to_units('erg/cm**2/s')
def _Q1_flux(ds,data):
E_HeI = 24.6 * yt.units.eV
kph = data[('enzo','HeI_kph')].convert_to_cgs()
n = data[('gas','H_p0_number_density')].convert_to_cgs()
dt = data.ds.parameters['dtPhoton']
V = data['cell_volume'].convert_to_cgs()
dx = data['dx'].convert_to_cgs()
s = 7.4300459E-18 * yt.units.cm**(2) # cross section of HeI at 24.6 eV
tau = s * n * dx
denom = 1.0 - np.exp(-tau)
Q = kph * n * V / denom # this gives number of photons / s
flux = Q * E_HeI / dx**2
return flux.convert_to_units('erg/cm**2/s')
def _metal_total_mass(field, data):
mass = data['Metal_Density'] * data['cell_volume']
return mass.convert_to_units('g')
def _grav_pot(field,data):
try:
x = (data['PotentialField'] * -1.0).convert_to_units('erg/g')
except:
x = ( (data['GravPotential'].value * data.ds.velocity_unit**2)
* -1.0).convert_to_units('erg/g')
return x
def _tot_grav_pot(field,data):
try:
x = data['PotentialField']
except:
x = data['GravPotential'].value * data.ds.velocity_unit**2
x = x + data[('index','DM_background_potential')]
return x.convert_to_units('erg/g')
def _gas_grav_pot(field,data):
try:
x = data['PotentialField']
except:
x = data['GravPotential'].value * data.ds.velocity_unit**2
return x.convert_to_units('erg/g')
def _pos_tot_grav_pot(field, data):
return np.abs(data[('gas','total_gravitational_potential')])
def _potential_energy(field,data):
x = data[('gas','total_gravitational_potential')] * data['cell_mass']
return x.convert_to_units('erg')
def _grav_bound(field, data):
PE = data[('gas','potential_energy')].convert_to_units('erg')
TE = ( data[('gas','thermal_energy')] * data['cell_mass'].convert_to_units('g')).convert_to_units('erg')
KE = ( data[('gas','kinetic_energy')] * data['cell_volume']).convert_to_units('erg')
result = 1 * ((TE + KE) + PE < 0.0)
return result*1.0
def _mag_cyl_r(field,data):
return np.abs( data[('index','cylindrical_radius')].convert_to_units('cm'))
def _mag_cyl_z(field,data):
return np.abs( data[('index','cylindrical_z')].convert_to_units('cm') )
def _dm_density(field, data):
r = data[('index','spherical_r')].convert_to_units('cm')
r_s = (data.ds.parameters['DiskGravityDarkMatterR'] * yt.units.Mpc).convert_to_units('cm')
rho_o = (data.ds.parameters['DiskGravityDarkMatterDensity'] * yt.units.g / yt.units.cm**3)
rho = dm_halo.burkert_density(r, r_s, rho_o)
return rho.convert_to_cgs()
def _dm_potential(field, data):
r = data[('index','spherical_r')].convert_to_units('cm')
r_s = (data.ds.parameters['DiskGravityDarkMatterR'] * yt.units.Mpc).convert_to_units('cm')
rho_o = (data.ds.parameters['DiskGravityDarkMatterDensity'] * yt.units.g / yt.units.cm**3)
pot = dm_halo.burkert_potential(r, r_s, rho_o)
return pot.convert_to_cgs()
def _rad_accel(field, data):
return np.sqrt(data['RadAccel1']**2 + data['RadAccel2']**2 + data['RadAccel3']**2).convert_to_units('cm/s**2')
def _is_star_forming(field, data):
n = data[('gas','number_density')].convert_to_units('cm**(-3)')
T = data['Temperature']
divv = data[('gas','velocity_divergence')]
l = data['grid_level']
answer = 1 * ((n > data.ds.parameters['StarMakerOverDensityThreshold']) *\
(T < data.ds.parameters['IndividualStarTemperatureThreshold']) *\
(divv < 0) *\
(l == data.ds.parameters['MaximumRefinementLevel']))
return answer
yt.add_field(('gas','is_star_forming'), function = _is_star_forming,
units = "")
yt.add_field(("gas","a_rad"), function=_rad_accel, units="cm/s**2")
yt.add_field(('index','DM_background_density'), function = _dm_density, units = 'g/cm**3')
yt.add_field(('index','DM_background_potential'), function = _dm_potential, units = 'erg/g')
yt.add_field(('index','magnitude_cylindrical_radius'), function = _mag_cyl_r, units = 'cm')
yt.add_field(('index','magnitude_cylindrical_z'), function = _mag_cyl_z, units = 'cm')
# def _H2_total_mass(field, data):
# mass = data[('gas',
yt.add_field(('gas','Pe_heating_rate'), function = _pe_heating_cgs, units = 'erg/s/cm**3')
yt.add_field(('gas','H_total_mass'), function = _H_total_mass, units ='g')
yt.add_field(('gas','H_Mass'), function = _H_total_mass, units = 'g') # define as same
yt.add_field(('gas','He_total_mass'), function = _He_total_mass, units = 'g')
yt.add_field(('gas','metal_mass'), function = _metal_total_mass, units = 'g')
yt.add_field(('gas','OTLW_kdissH2I'), function = _otlwcgs, units = '1/s',
validators=ValidateDataField(('enzo','OTLW_kdissH2I')))
yt.add_field(('gas','LW_flux'), function = _LW_flux, units = "erg/s/cm**2",
validators=ValidateDataField(('enzo','OTLW_kdissH2I')))
yt.add_field(('gas','Pe_heating_rate_masked'), function = _pe_heating_rate_masked, units='erg/s/cm**3')
yt.add_field(('gas','G_o'), function = _G_o, units = "")
yt.add_field(('gas','G_eff'), function = _G_eff, units = "")
yt.add_field(('gas','FUV_flux'), function = _FUV_flux, units = "erg/s/cm**2")
yt.add_field(('gas','Q0_flux'), function = _Q0_flux, units = "erg/s/cm**2")
yt.add_field(('gas','Q1_flux'), function = _Q1_flux, units = "erg/s/cm**2")
# yt.add_field(('gas','H2_total_mass'), function = _H2_total_mass, units = 'g')
# yt.add_field(('gas','All_H_total_mass'), function = _all_H_total_mass, units = 'g')
if ('enzo','PotentialField') in fields or ('enzo', 'GravPotential') in fields:
yt.add_field(('gas','pos_gravitational_potential'), function=_grav_pot, units = 'erg/g')
yt.add_field(('gas','gas_gravitational_potential'), function=_gas_grav_pot, units = 'erg/g')
yt.add_field(('gas','total_gravitational_potential'), function=_tot_grav_pot, units = 'erg/g')
yt.add_field(('gas','pos_total_gravitational_potential'), function=_pos_tot_grav_pot, units = 'erg/g')
yt.add_field(('gas','potential_energy'), function=_potential_energy, units = 'erg')
yt.add_field(('gas','gravitationally_bound'), function=_grav_bound, units = "")
nfields = 5
return nfields
def generate_derived_fields(ds):
"""
Given a data set (to extract the on-disk field names), generate
derived fields that will persist for the python session (i.e. not
tied only to the passed data set).
Right now, takes in all metal species tracer fields and constructs
fields for their mass fraction, number density, and all possible
interesting abundance ratios.
NOTE: The derived fields will only exist for data sets loaded after
this function call. If analysis is intended for passed data set,
it will need to be reloaded for fields to exist.
"""
fields = ds.field_list
# lets figure out the metal tracers present
metals = utilities.species_from_fields(fields)
ratios = utilities.ratios_list(metals)
# make new functions to do correct units for species fields
_density_function_generator(metals + ['Metal'])
print("tracer species present: ", metals)
nfields = _mass_function_generator(metals)
print(nfields, "mass fields defined")
nfields = _mass_fraction_function_generator(metals)
print(nfields, "mass fraction fields defined")
nfields = _number_density_function_generator(metals)
print(nfields, "number density fields defined")
if not (ionization._ion_table is None):
nfields = _ionization_state_generator(metals)
print(nfields, "ionization state fields defined")
nfields = _abundance_ratio_function_generator(ratios, H_mode = 'total')
print(nfields, "abundance ratio fields defined")
nfields = _abundance_function_generator(metals)
if ds.parameters['NumberOfParticles'] > 0:
if ('io','particle_' + metals[0] + '_fraction') in ds.field_list:
nfields = _particle_abundance_ratio_function_generator(ratios, ds)
print(nfields, "particle abundance ratio fields defined")
_particle_abundance_function_generator(metals, ds)
generate_stellar_model_fields(ds)
nfields = _additional_helper_fields(fields)
print(nfields, "additional helper fields defined")
#generate_grackle_fields(ds)
FIELDS_DEFINED = True
return
def load_and_define(name):
"""
Wrapper around yt to load a data set and define gradient
fields and particle filters which must be defined for each
simulation file separately (unlike the above)
"""
ds = yt.load(name)
generate_gradient_fields(ds)
def _grav_accel_x(field,data):
return data[('gas','gas_gravitational_potential_gradient_x')].convert_to_units('cm/s**2')
def _grav_accel_y(field,data):
return data[('gas','gas_gravitational_potential_gradient_y')].convert_to_units('cm/s**2')
def _grav_accel_z(field,data):
return data[('gas','gas_gravitational_potential_gradient_z')].convert_to_units('cm/s**2')
def _grav_accel(field,data):
return np.sqrt(data[('gas','a_grav_x')]**2 + data[('gas','a_grav_y')]**2 + data[('gas','a_grav_z')]**2)
def _a_rad_a_grav(field,data):
a = data[('gas','a_rad')] / data[('gas','a_grav')]
a[data[('gas','a_grav')] == 0.0] = 0.0
return a
ds.add_field(('gas','a_grav_x'), function = _grav_accel_x, units = 'cm/s**2', sampling_type='cell')
ds.add_field(('gas','a_grav_y'), function = _grav_accel_y, units = 'cm/s**2', sampling_type='cell')
ds.add_field(('gas','a_grav_z'), function = _grav_accel_z, units = 'cm/s**2', sampling_type='cell')
ds.add_field(('gas','a_grav'), function = _grav_accel, units = 'cm/s**2', sampling_type='cell')
ds.add_field(('gas','a_rad_over_a_grav'), function = _a_rad_a_grav, units = '', sampling_type = 'cell')
generate_particle_filters(ds)
#generate_grackle_fields(ds)
return ds
def generate_grackle_fields(ds):
if not GRACKLE_IMPORTED:
print("Grackle's python wrapper (pygrackle) was not imported successfully")
if ds.parameters['use_grackle']:
_grackle_fields(ds)
return
def generate_gradient_fields(ds):
"""
generate gas self gravity gradient fields and rename them to
something sensible
"""
ds.add_gradient_fields(("gas","gas_gravitational_potential"))
return
def generate_particle_filters(ds):
"""
Make filter definitions for the various particle types:
Main Sequence :
White Dwarf :
SNIa remnant :
SNII remnant :
AGB phase (likely very few or none since short lived) :
"""
@yt.particle_filter(requires=["particle_type"], filtered_type='all')
def main_sequence_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 11
return filter
@yt.particle_filter(requires=["particle_type"], filtered_type='all')
def main_sequence_popIII_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 14
return filter
@yt.particle_filter(requires=["particle_type"], filtered_type='all')
def remnant_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 13
return filter
@yt.particle_filter(requires=["particle_type",'birth_mass'], filtered_type='all')
def low_mass_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 11
filter = filter * (data[(pfilter.filtered_type,"birth_mass")] > 2.0) * (data[(pfilter.filtered_type,"birth_mass")] <8.0)
return filter
@yt.particle_filter(requires=["particle_type",'birth_mass'], filtered_type='all')
def low_mass_unresolved_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 15
return filter
ds.add_particle_filter('main_sequence_stars')
ds.add_particle_filter('remnant_stars')
ds.add_particle_filter('low_mass_stars')
ds.add_particle_filter('low_mass_unresolved_stars')
ds.add_particle_filter('main_sequence_popIII_stars')
return
source tracer field fractions
__author__ = "Andrew Emerick"
__email__ = "aemerick11@gmail.com"
import yt
yt.funcs.mylog.setLevel(40)
from yt.fields.api import ValidateDataField, ValidateParameter
from yt.units import dimensions
import numpy as np
from collections import Iterable
from galaxy_analysis.static_data import AMU,\
MOLECULAR_WEIGHT
from galaxy_analysis.utilities import convert_abundances
from galaxy_analysis.utilities import utilities
from galaxy_analysis import star_analysis
from galaxy_analysis.misc import dm_halo
from galaxy_analysis.yt_fields import ionization
from onezone import data_tables, radiation
GRACKLE_IMPORTED = True
try:
import pygrackle
from pygrackle.grackle_wrapper import \
calculate_cooling_time
except:
GRACKLE_IMPORTED = False
SE_table = data_tables.StellarEvolutionData()
FIELDS_DEFINED = False
def _density_function_generator(asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _density(field, data):
dens = data[('enzo', a + '_Density')].value
dens = dens * data.ds.mass_unit / data.ds.length_unit**3
return dens.convert_to_units('g/cm**3')
return _density
for a in asym:
yt.add_field(('gas', a + "_Density"), function = return_function(a), units = 'g/cm**3')
return
def _abundance_function_generator(asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _abundance(field,data):
mass = data[('gas', a + '_Mass')].convert_to_units('g').value
abund = convert_abundances.elemental_abundance( a, mass)
return abund
return _abundance
if not ('H' in asym):
asym = asym + ['H']
for a in asym:
yt.add_field(('gas',a + '_Abundance'), function = return_function(a), units = "")
if (('O' in asym) and ('Mg' in asym) and ('Si' in asym)):
def _alpha_abundance(field, data):
alpha = data[('gas','O_Abundance')] + data[('gas','Mg_Abundance')] + data[('gas','Si_Abundance')]
return alpha / 3.0
yt.add_field(('gas','alpha_Abundance'), function=_alpha_abundance, units="")
return
def _mass_function_generator(asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _mass(field,data):
ele_dens = data[('enzo', a + '_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
mass = (ele_dens * data['cell_volume']).convert_to_units('g')
return mass
return _mass
nfields = 0
for a in asym:
yt.add_field(('gas', a + '_Mass'), function = return_function(a), units='g')
nfields = nfields + 1
if (('O' in asym) and ('Mg' in asym) and ('Si' in asym)):
def _alpha_mass(field, data):
alpha = data[('gas','O_Mass')] + data[('gas','Mg_Mass')] + data[('gas','Si_Mass')]
return alpha
yt.add_field(('gas','alpha_Mass'), function = _alpha_mass, units = "g") # mass of alpha elements
nfields = nfields + 1
return nfields
#
# Construct arbitrary mass fraction derived fields in yt
# using a loop to generate functions
#
def _mass_fraction_function_generator(ds, asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _mass_fraction(field,data):
ele_dens = data[('enzo', a + '_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
dens = data[('enzo','Density')].convert_to_cgs()
mass_fraction = ele_dens / dens
return mass_fraction
return _mass_fraction
nfields = 0
for a in asym:
yt.add_field(('gas', a + '_Fraction'), function = return_function(a), units="")
nfields = nfields + 1
if (('O' in asym) and ('Mg' in asym) and ('Si' in asym)):
def _alpha_mass_fraction(field, data):
alpha = data[('gas','O_Fraction')] + data[('gas','Mg_Fraction')] +\
data[('gas','Si_Fraction')]
return alpha / 3.0
# yt.add_field(('gas','alpha_Fraction'), function = _alpha_mass_fraction, units = "")
if (('S' in asym) and ('Ca' in asym)):
def _alpha_5(field, data):
alpha = data[('gas','alpha_Fraction')]*3.0 + data[('gas','S_Fraction')] +\
data[('gas','Ca_Fraction')]
return alpha / 5.0
# yt.add_field( ('gas','alpha_5_Fraction'), function = _alpha_5, units = "")
nfields = nfields + 1
if 'IndividualStarTrackAGBMetalDensity' in ds.parameters:
if ds.parameters['IndividualStarTrackAGBMetalDensity']:
def _AGB_mass_fraction(field,data):
ele_dens = data[('enzo', 'AGB_Metal_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
dens = data[('enzo','Density')].convert_to_cgs()
mass_fraction = ele_dens / dens
return mass_fraction
yt.add_field(('gas', 'AGB_Mass_Fraction'), function = _AGB_mass_fraction, units="")
if ds.parameters['IndividualStarTrackSNMetalDensity']:
def _SNII_mass_fraction(field,data):
ele_dens = data[('enzo', 'SNII_Metal_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
dens = data[('enzo','Density')].convert_to_cgs()
mass_fraction = ele_dens / dens
return mass_fraction
yt.add_field(('gas', 'SNII_Mass_Fraction'), function = _SNII_mass_fraction, units="")
def _SNIa_mass_fraction(field,data):
ele_dens = data[('enzo', 'SNIa_Metal_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
dens = data[('enzo','Density')].convert_to_cgs()
mass_fraction = ele_dens / dens
return mass_fraction
yt.add_field(('gas', 'SNIa_Mass_Fraction'), function = _SNIa_mass_fraction, units="")
if ds.parameters['IndividualStarPopIIIFormation']:
def _PopIII_mass_fraction(field,data):
ele_dens = data[('enzo', 'PopIII_Metal_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
dens = data[('enzo','Density')].convert_to_cgs()
mass_fraction = ele_dens / dens
return mass_fraction
yt.add_field(('gas', 'PopIII_Mass_Fraction'), function = _PopIII_mass_fraction, units="")
return nfields
#
# Construct arbitrary number density derived fields in yt
# using a loop to generate functions
#
def _number_density_function_generator(asym):
if not isinstance(asym, Iterable):
asym = [asym]
def return_function(a):
def _number_density(field,data):
ele_dens = data[('enzo', a + '_Density')].value
ele_dens = ele_dens * data.ds.mass_unit / data.ds.length_unit**3
ele_dens = ele_dens.convert_to_cgs()
n = ele_dens / (MOLECULAR_WEIGHT[a] * AMU * yt.units.g)
return n.convert_to_cgs()
return _number_density
nfields = 0
for a in asym:
yt.add_field(('gas', a + '_Number_Density'),
function = return_function(a), units='cm**(-3)')
nfields = nfields + 1
# make a metal number density field - make an assumption about the metal molecular weight
def _metal_number_density(field,data):
ele_dens = data[('enzo','Metal_Density')].convert_to_units('g/cm**3')
n = ele_dens / (MOLECULAR_WEIGHT['metal'] * AMU * yt.units.g)
return n.convert_to_cgs()
yt.add_field(('gas', 'Metal_Number_Density'),
function = _metal_number_density, units = 'cm**(-3)')
def _H_total_number_density(field,data):
n_H = data['H_p0_number_density'] + data['H_p1_number_density']
try:
n_H += data['H_m1_number_density'] +\
0.5 * (data['H2_p0_number_density'] + data['H2_p1_number_density'])
except:
n_H += np.zeros(np.shape(n_H))
return n_H.convert_to_units('cm**(-3)')
yt.add_field(('gas','H_total_number_density'),
function = _H_total_number_density, units = 'cm**(-3)')
return nfields
def _ionization_state_generator(metals):
temp_ions = ionization.get_ions()
all_ions = [x for x in temp_ions if ionization.get_elements(x) in metals]
def return_function(ion):
def _ion_density(field,data):
ele = ionization.get_elements(ion)
n_ele = data[('gas', ele + '_Number_Density')].value
n = data['H_total_number_density'].to('cm**(-3)').value
n = np.log10(n)
T = data['Temperature'].to('K').value
T = np.log10(T)
f_ion = 10.0**(ionization.get_ion_fraction(n, T, ion))
return (n_ele * f_ion) * yt.units.cm**(-3)
return _ion_density
nfields = 0
for ion in all_ions:
yt.add_field(('gas', ion + '_Number_Density'),
function = return_function(ion), units='cm**(-3)')
nfields = nfields + 1
return nfields
def _generate_rates(ds):
"""
Generate reaction rate equations
"""
kunit = 1.0 #
def k8(T):
T = T.convert_to_units('K').value
k8 = 1.35E-9 * (T**(9.8493E-2) + 3.2852E-1 *\
T**(5.561E-1) + 2.881E-7 * T**2.1826) /\
(1.0 + 6.191E-3 * T**1.0461 + 8.9712E-11*T**3.0424 +\
3.2576E-14 * T**3.7741)
return k8 # now in cgs
def k10(T): # have arg T to look same as other functions
k10 = 6.0E-10
return k10
def k19(T):
T = T.convert_to_units('K').value
k19 = 5.0E-7 * np.sqrt(100.0 / T)
return k19
def k22(T):
T = T.convert_to_units('K').value
# for GLover 2008 three body rate ONLY
k22 = 7.7E-31 / T**0.464
return k22
def k13(T):
T = T.convert_to_units('K').value
k13 = 10.0**(-178.4239 - 68.42243 * np.log10(T)
+ 43.20243 * np.log10(T)**2
- 4.633167 * np.log10(T)**3
+ 69.70086 * np.log10(1.0 + 40870.38 / T)
- (23705.7 / T))
###############
# above is for use with Glover 2008 three body rate
#
# T_eV = (T / yt.physical_constants.k_b).convert_to_units('eV').value
# T_lim = 0.3
#
# k13 = np.ones(np.shape(T)) * 1.0E-20
#
# k13[ T > T_lim] = 1.0670825E-10*T_eV**(2.012) /\
# (np.exp(4.463/T_eV) * (1.0 + 0.2472 * T_eV)**3.512)
return k13
def k11(T):
T_eV = (T / yt.physical_constants.k_b).convert_to_units('eV').value
T_lim = 0.3
k11 = np.ones(np.shape(T)) * 1.0E-20
log_T = np.log(T.convert_to_units('K').value)
k11[ T_eV > T_lim] = (np.exp(-21237.15/T) *\
(- 3.3232183E-7
+ 3.3735382E-7 * log_T
- 1.4491368E-7 * log_T**2
+ 3.4172805E-8 * log_T**3
- 4.7813720E-9 * log_T**4
+ 3.9731542E-10 * log_T**5
- 1.8171411E-11 * log_T**6
+ 3.5311932E-13 * log_T**7))
return k11
def k12(T):
T_eV = (T / yt.physical_constants.k_b).convert_to_units('eV').value
T_lim = 0.3
k12 = np.ones(np.shape(T)) * 1.0E-20
k12[T>T_lim] = 4.4886E-9*T**(0.109127)*np.exp(-101858.0/T)
return k12
# def k29(T)
#
# return k29
reaction_units = 1.0 / yt.units.cm**3 / yt.units.s
ru_label = '1/s/cm**3'
def _k8_reaction_rate(field, data):
rr = k8(data['Temperature'].convert_to_units('K'))
rr = 2.0 * rr * data[('gas','H_m1_number_density')].convert_to_cgs().value *\
data[('gas','H_p0_number_density')].convert_to_cgs().value
return rr * reaction_units
def _k10_reaction_rate(field,data):
rr = k10(data['Temperature'].convert_to_units('K'))
rr = rr * data[('gas','H2_p1_number_density')].convert_to_cgs().value *\
data[('gas','H_p0_number_density')].convert_to_cgs().value
return rr * reaction_units
def _k19_reaction_rate(field,data):
rr = k19(data['Temperature'].convert_to_units('K'))
rr = rr * data[('gas','H2_p1_number_density')].convert_to_cgs().value *\
data[('gas','H_m1_number_density')].convert_to_cgs().value
return rr * reaction_units
def _k22_reaction_rate(field, data):
rr = k22(data['Temperature'].convert_to_units('K'))
rr = rr * (data[('gas','H_p0_number_density')].convert_to_cgs().value)**3
return rr * reaction_units
yt.add_field(('gas','k8_rr'),
function = _k8_reaction_rate, units = ru_label)
yt.add_field(('gas','k10_rr'),
function = _k10_reaction_rate, units = ru_label)
yt.add_field(('gas','k19_rr'),
function = _k19_reaction_rate, units = ru_label)
yt.add_field(('gas','k22_rr'),
function = _k22_reaction_rate, units = ru_label)
# rates
# scoef
# 2.0 * ( k8 * HM * HI - set with interp
# k10 * H2II * HI * 0.5 - set with interp
# k19 * H2II * HM * 0.5 - set with interp
# k22 * HI * (HI*HI) - set with interp
#
# acoef
# k13*HI + k11*HII + k12*de + k29 + k31shield
# idust
# + 2 * H2dust * HI * rhoH
#
# H2I = (scoef*dtit + H2I) / (1.0 + acoef*dtit)
#
# passes density field
return
def _particle_abundance_function_generator(asym, ds = None):
if not (ds is None):
if not (ds.parameters['NumberOfParticles'] > 0):
return
if not isinstance(asym, Iterable):
asym = [asym]
if not ('H' in asym):
asym = asym + ['H']
if not ('He' in asym):
asym = asym + ['He']
def return_function(element, fraction_field):
def _abundance(field, data):
mass = data[fraction_field].value * (data['birth_mass'].value *yt.units.Msun).convert_to_units('g').value
abund = convert_abundances.elemental_abundance(element, mass)
return abund
return _abundance
for a in asym:
fraction_field = ('io','particle_' + a + '_fraction')
yt.add_field(('io','particle_' + a + '_abundance'),
return_function(a, fraction_field), units = "", particle_type = True)
if (('O' in asym) and ('Mg' in asym) and ('Si' in asym)):
def _alpha_fraction(field, data):
alpha = data[('io','particle_O_fraction')] +\
data[('io','particle_Mg_fraction')] +\
data[('io','particle_Si_fraction')]
return alpha / 3.0
def _alpha_abundance(field, data):
alpha = data[('io','particle_O_abundance')] +\
data[('io','particle_Mg_abundance')] +\
data[('io','particle_Si_abundance')]
return alpha / 3.0
# yt.add_field(('io','particle_alpha_fraction'), function=_alpha_fraction, units = "", particle_type = True)
yt.add_field(('io','particle_alpha_abundance'), function=_alpha_abundance, units="", particle_type=True)
if ('S' in asym) and ('Ca' in asym):
def _alpha_5_fraction(field,data):
alpha = (data[('io','particle_alpha_fraction')]*3.0 + data[('io','particle_S_fraction')] +\
data[('io','particle_Ca_fraction')]) / 5.0
return alpha
def _alpha_5(field,data):
alpha = data[('io','particle_alpha_abundance')]*3.0 + data[('io','particle_S_abundance')]+\
data[('io','particle_Ca_abundance')]
return alpha / 5.0
# yt.add_field( ('io','particle_alpha_5_fraction'), function = _alpha_5_fraction, units = "", particle_type =True)
yt.add_field( ('io','particle_alpha_5_abundance'), function = _alpha_5, units = "", particle_type = True)
return
def _particle_abundance_ratio_function_generator(ratios, ds = None):
if not (ds is None):
if not (ds.parameters['NumberOfParticles'] > 0):
return
if not isinstance(ratios, Iterable):
ratios = [ratios]
def return_function(ele1, ele2, field1, field2):
def _abundance_ratio(field, data):
mass1 = data[field1].value
mass1 = ((mass1 * data['birth_mass'].value) * yt.units.Msun).convert_to_units('g')
mass2 = data[field2].value
mass2 = ((mass2 * data['birth_mass'].value) * yt.units.Msun).convert_to_units('g')
ratio = convert_abundances.abundance_ratio( (ele1, mass1.value), (ele2, mass2.value), 'mass')
return ratio * yt.units.g / yt.units.g
return _abundance_ratio
nfields = 0
for r in ratios:
ele1, ele2 = r.rsplit('/')
field1 = ('io','particle_' + ele1 + '_fraction')
field2 = ('io','particle_' + ele2 + '_fraction')
fieldname = 'particle_' + ele1 + '_over_' + ele2
yt.add_field(('io', fieldname), function = return_function(ele1,ele2,field1,field2),
units = "", particle_type = True)
nfields = nfields + 1
def _alpha_return_function(base):
def _alpha_over_x(field,data):
alpha = data[('io','particle_alpha_abundance')]
x = data[('io','particle_' + base + '_abundance')]
return convert_abundances.abundance_ratio(('alpha',alpha),(base,x), 'abundances')
return _alpha_over_x
def _alpha_5_return_function(base):
def _alpha_5_over_x(field,data):
alpha = data[('io','particle_alpha_5_abundance')]
x = data[('io','particle_' + base + '_abundance')]
return convert_abundances.abundance_ratio(('alpha_5',alpha),(base,x), 'abundances')
return _alpha_5_over_x
denoms = [x.split('/')[1] for x in ratios]
denoms = np.unique(denoms)
if ('io','particle_alpha_abundance') in ds.derived_field_list:
for x in denoms:
yt.add_field(('io','particle_alpha_over_' + x), function = _alpha_return_function(x), units = "", particle_type = True)
yt.add_field(('io','particle_alpha_5_over_' + x), function = _alpha_5_return_function(x), units = "", particle_type = True)
# def _alpha_over_Fe(field,data):
# alpha = data[('io','particle_alpha_abundance')]
# Fe = data[('io','particle_Fe_abundance')]
# return convert_abundances.abundance_ratio( ('alpha', alpha), ('Fe', Fe), 'abundances')
# yt.add_field(('io','particle_alpha_over_Fe'), function = _alpha_over_Fe, units = "", particle_type = True)
# def _alpha_5_over_Fe(field,data):
# alpha = data[('io','particle_alpha_5_abundance')]
# Fe = data[('io','particle_Fe_abundance')]
# return convert_abundances.abundance_ratio( ('alpha_5', alpha), ('Fe', Fe), 'abundances')
# yt.add_field(('io','particle_alpha_5_over_Fe'), function = _alpha_5_over_Fe, units = "", particle_type = True)
return nfields
#
# Construct arbitrary abundance ratio fields in yt
# using a function generator
#
def _abundance_ratio_function_generator(ratios, H_mode = 'total'):
if not isinstance(ratios, Iterable):
ratios = [ratios]
def _H_mass(data, mode):
if mode == 'total':
mass = data[('enzo','HI_Density')] + data[('enzo','HII_Density')]
if ('enzo','H2I_Density') in data.ds.field_list:
mass += data[('enzo','HM_Density')] + data[('enzo','H2I_Density')] +\
data[('enzo','H2II_Density')]
elif mode == 'HI':
mass = data[('enzo','HI_Density')]
elif mode == 'HII':
mass = data[('enzo','HII_Denisty')]
return mass
def return_function(ele1, ele2, field1, field2):
def _abundance_ratio(field, data):
if ele1 == 'H':
mass1 = _H_mass(data, H_mode)
else:
mass1 = data[field1]
if ele1 != 'H' and ele1 != 'He':
mass1 = mass1.value * data.ds.mass_unit / data.ds.length_unit**3
mass1 = (mass1 * data['cell_volume']).convert_to_units('g')
if ele2 == 'H':
mass2 = _H_mass(data, H_mode)
else:
mass2 = data[field2]
if ele2 != 'H' and ele2 != 'He':
mass2 = mass2.value * data.ds.mass_unit / data.ds.length_unit**3
mass2 = (mass2 * data['cell_volume']).convert_to_units('g')
ratio = convert_abundances.abundance_ratio( (ele1, mass1.value), (ele2, mass2.value), 'mass')
return ratio * yt.units.g / yt.units.g
return _abundance_ratio
nfields = 0
for r in ratios:
ele1, ele2 = r.rsplit('/')
if ele1 != 'H' and ele2 != 'He':
field1 = ('enzo', ele1 + '_Density')
else:
field1 = ('gas', ele1 + '_density')
if ele2 != 'H' and ele2 != 'He':
field2 = ('enzo', ele2 + '_Density')
else:
field2 = ('gas', ele2 + '_density')
fieldname = ele1 + '_over_' + ele2
yt.add_field(('gas', fieldname), function = return_function(ele1,ele2,field1,field2),
units = "")
nfields = nfields + 1
def _return_alpha_over_x(element_name):
def _alpha_over_x(field, data):
alpha = data[('gas','alpha_Abundance')]
x = data[('gas',element_name + '_Abundance')]
return convert_abundances.abundance_ratio(('alpha',alpha),(element_name,x),'abundances')
return _alpha_over_x
denoms = [x.split('/')[1] for x in ratios]
denoms = np.unique(denoms)
for x in denoms:
yt.add_field(('gas','alpha_over_' + x), function = _return_alpha_over_x(x), units = "")
return nfields
def generate_stellar_model_fields(ds):
if not (ds.parameters['NumberOfParticles'] > 0):
return
#
# luminosity, L_FUV, L_LW, Q0, Q1, E0, E1
#
field_names = ['luminosity','L_FUV','L_LW','Q0','Q1','E0','E1', 'Teff','R']
units = {'luminosity' : yt.units.erg/yt.units.s,
'L_FUV' : yt.units.erg/yt.units.s,
'L_LW' : yt.units.erg/yt.units.s,
'Q0' : 1.0 /yt.units.s, 'Q1' : 1.0 / yt.units.s,
'E0' : yt.units.erg, 'E1' : yt.units.erg, 'lifetime' : yt.units.s,
'Teff' : yt.units.K, 'R' : yt.units.cm}
unit_label = {'luminosity': 'erg/s', 'L_FUV' : 'erg/s', 'L_LW' : 'erg/s',
'Q0' : '1/s', 'Q1' : '1/s', 'E0' : 'erg', 'E1': 'erg', 'lifetime' : 's', 'Teff' : 'K',
'R' : 'cm'}
overload_type = {} # when generating stars, default is use type from simulation
for k in units.keys():
overload_type[k] = None # keep simulation type when making stars for all fields
overload_type['lifetime'] = 11 # lifetime field will now be the lifetime of the original MS star
# and NOT the lifetime from the simulation --- this is done by
# overloading the particle type to 11, or main sequence. This is
# also more useful, as the field would just be redundant
# info if this isn't done. See star_analysis code.
def _function_generator(field_name):
def _function(field, data):
if np.size(data['particle_mass']) == 1:
# this is ugly, but a way to bypass yt's validation step
# because throwing junk values into the routine below will cause problems
with utilities.nostdout():
p = star_analysis.get_star_property(ds, data, property_names = [field_name],
dummy_call = True, overload_type = None)
else:
p = star_analysis.get_star_property(ds, data, property_names = [field_name],
overload_type = overload_type[field_name] )
p = p * units[field_name]
return p
return _function
def _model_L0(field, data):
Q0 = data[('io','particle_model_Q0')]
E0 = data[('io','particle_model_E0')]
return (E0 * Q0).convert_to_units('erg/s')
def _model_L1(field, data):
Q1 = data[('io','particle_model_Q1')]
E1 = data[('io','particle_model_E1')]
return (E1 * Q1).convert_to_units('erg/s')
def _age(field, data):
p = data[('io','creation_time')]
t = data.ds.current_time
return (t - p).convert_to_units('Myr')
def _model_L_1_3eV(field, data):
Teff = data[('io','particle_model_Teff')].value
flux = np.array([radiation.BB_flux(1.0, 3.0, T) for T in Teff])
flux = flux * yt.units.erg / yt.units.s / yt.units.cm**2
SA = 4.0 * np.pi * data[('io','particle_model_R')]**2
return flux * SA
for field in field_names:
yt.add_field(('io', 'particle_model_' + field),
function = _function_generator(field), units=unit_label[field],
particle_type = True)
def _lifetime(field, data):
m = data['birth_mass'].value
z = data['metallicity_fraction'].value
if np.size(m) == 1: # get around yt's checking
lt = np.shape(m) * yt.units.Myr
else:
lt = np.zeros(np.size(m))
for i in np.arange(np.size(m)):
lt[i] = SE_table.interpolate({'mass' : m[i], 'metallicity' : z[i]}, 'lifetime')
lt = (lt * yt.units.s).convert_to_units('Myr')
return lt
yt.add_field(('io','particle_model_lifetime'), function = _lifetime, units = 'Myr',
particle_type = True)
yt.add_field(('io', 'particle_age'), function = _age, units = 'Myr',
particle_type = True)
yt.add_field(('io','particle_model_L0'), function = _model_L0, units = 'erg/s',
particle_type = True)
yt.add_field(('io','particle_model_L1'), function = _model_L1, units = 'erg/s',
particle_type = True)
yt.add_field(('io','particle_model_L_1_3eV'), function = _model_L_1_3eV, units = 'erg/s',
particle_type = True)
return
def _grackle_fields(ds):
"""
Fields that require use of pygrackle
"""
cdata = pygrackle.chemistry_data()
cdata.use_grackle = 1
enzo_to_grackle = { 'MultiSpecies' : 'primordial_chemistry',
'MetalCooling' : 'metal_cooling',
'self_shielding_method' : 'self_shielding_method',
#
# The below is broken in pygrackle - not sure of prob
# 'H2_self_shielding' : 'H2_self_shielding',
'grackle_data_file' : 'grackle_data_file',
'DensityUnits' : 'density_units',
'LengthUnits' : 'length_units',
'TimeUnits' : 'time_units',
'ComovingCoordinates': 'comoving_coordinates',
'with_radiative_cooling' : 'with_radiative_cooling',
'UVbackground' : 'UVbackground'}
for k in enzo_to_grackle:
setattr(cdata, enzo_to_grackle[k], ds.parameters[k])
cdata.a_units = 1.0
cdata.a_value = 1.0
cdata.velocity_units = cdata.length_units / cdata.time_units
#cdata.energy_units = (cdata.length_units / cdata.time_units)**2.0
cdata.initialize()
def _H2_self_shielding_length(field, data):
return data['dx'].convert_to_units('cm')
ds.add_field(('gas','H2_self_shielding_length'), function = _H2_self_shielding_length, units='cm')
def _cooling_time(field, data):
#
# This checks if yt is doing its fake-data error checking and
# gives dummy result... a bit of a hack....
#
# compute field in grackle grid-by-grid
field_list = data.ds.derived_field_list + data.ds.field_list
flat_fields = {}
fc = pygrackle.FluidContainer(cdata,10) # dummy container for now
for f1, f2, conv in pygrackle.fluid_container._needed_fields(fc):
if f2 not in field_list:
raise pygrackle.fluid_container.FieldNotFound(f2)
else:
flat_fields[f2[1]] = np.zeros(np.size( data['Density']))
for f1, f2,conv in pygrackle.fluid_container._needed_fields(fc):
flat_fields[f2[1]] = ((1.0*data[f2]).value).flatten() / conv
flat_fields[f2[1]] = np.zeros(np.size( data['Density'].flatten()))
flat_fields['cooling_time'] = np.zeros(np.size(flat_fields[f2[1]]))
# compute a new FC every 8192 zones
imin = 0
imax = 0
di = 8192
ncells = np.size(flat_fields['cooling_time'])
while imax < ncells:
imin = 1*imax
imax = np.min( [imax + di, ncells] )
fc = pygrackle.FluidContainer(cdata, imax - imin)
for f1, f2, conv in pygrackle.fluid_container._needed_fields(fc):
fc[f1][:] = flat_fields[f2[1]][imin:imax]
fc.calculate_cooling_time()
flat_fields['cooling_time'][imin:imax] = fc['cooling_time']
flat_fields['cooling_time'] = flat_fields['cooling_time'] * cdata.time_units
return flat_fields['cooling_time'].reshape( np.shape(data['Density'].value)) * yt.units.s
def _neg_cooling_time(field,data):
return -1.0 * data['cooling_time']
ds.add_field(('gas','cooling_time'), function = _cooling_time, units = 's')
ds.add_field(('gas','neg_cooling_time'), function = _neg_cooling_time, units = 's')
return
def _additional_helper_fields(fields):
nfields = 0
def _H_total_mass(field, data):
mass = data[('gas','H_p0_mass')] + data[('gas','H_p1_mass')]
return mass
def _He_total_mass(field, data):
mass = data[('gas','He_p0_mass')] + data[('gas','He_p1_mass')] +\
data[('gas','He_p2_mass')]
return mass
def _pe_heating_cgs(field,data):
pe = data[('enzo','Pe_heating_rate')].value
energy_unit = data.ds.mass_unit * data.ds.velocity_unit**2
pe = pe * energy_unit / data.ds.length_unit**3 / data.ds.time_unit
return pe.convert_to_units('erg/s/cm**3')
def _pe_heating_rate_masked(field, data):
pe = data[('gas','Pe_heating_rate')].convert_to_units('erg/s/cm**3')
x = 1.0 * pe
x[data['temperature'] > data.ds.parameters['IndividualStarFUVTemperatureCutoff']] = 0.0
return x
def _otlwcgs(field, data):
if ('enzo','OTLW_kdissH2I') in data.ds.field_list:
lw = data[('enzo','OTLW_kdissH2I')].value / data.ds.time_unit
else:
lw = np.zeros(np.shape(data['Density'])) / data.ds.time_unit
return lw.convert_to_units('1/s')
def _G_o(field,data):
pe = data[('gas','Pe_heating_rate')].convert_to_units('erg/s/cm**3').value
Z = (data['Metal_Density'] / data['Density']).value
n_H = (data['H_p0_number_density'] + data['H_p1_number_density'] + data['H_m1_number_density'] +\
0.5*(data['H2_p0_number_density'] + data['H2_p1_number_density'])).convert_to_units('cm**(-3)').value
logZ = np.log10(Z / 0.014)
g_to_d = np.zeros(np.shape(logZ))
g_to_d[logZ <= -0.73] = 0.68 - 3.08*logZ[logZ <= -0.73]
g_to_d[logZ > -0.73] = 2.21 - 1.00*logZ[logZ > -0.73]
d_to_g = 1.0 / (10.0**(g_to_d))
D = d_to_g / 6.616595E-3
epsilon = 0.01488637246 * (n_H)**(0.235269059)
atten = np.exp( - 1.33E-21 * D * data['dx'].convert_to_units('cm').value * n_H)
G_o = pe / (1.3E-24 * n_H * epsilon * D * atten)
return G_o * (data['Density'] / data['Density'])
def _G_eff(field,data):
pe = data[('gas','Pe_heating_rate')].convert_to_units('erg/s/cm**3').value
Z = (data['Metal_Density'] / data['Density']).value
n_H = (data['H_p0_number_density'] + data['H_p1_number_density'] + data['H_m1_number_density'] +\
0.5*(data['H2_p0_number_density'] + data['H2_p1_number_density'])).convert_to_units('cm**(-3)').value
logZ = np.log10(Z / 0.014)
g_to_d = np.zeros(np.shape(logZ))
g_to_d[ logZ <= -0.73] = 0.68 - 3.08*logZ[logZ <= -0.73]
g_to_d[ logZ > -0.73] = 2.21 - 1.00*logZ[logZ > -0.73]
d_to_g = 1.0 / (10.0**(g_to_d))
D = d_to_g / 6.616595E-3
epsilon = 0.01488637246 * (n_H)**(0.235269059)
# atten = np.exp( - 1.33E-21 * D * data['dx'].convert_to_units('cm').value * n_H)
G_eff = pe / (1.3E-24 * n_H * epsilon * D)
return G_eff * (data['Density'] / data['Density'])
def _FUV_flux(field, data):
# 1.59E-3 converts from MW normalized flux density to flux dens in cgs
G_o = data[('gas','G_o')] # relative to MW
G = (G_o.value * 1.59E-3) * yt.units.erg / yt.units.cm**2 /yt.units.s
return G
def _LW_flux(field, data):
LW_energy = 12.8 * yt.units.eV
H2Isigma = 3.71E-18 * yt.units.cm**(2)
if ('enzo','OTLW_kdissH2I') in data.ds.field_list:
kdissH2I = (data[('enzo','OTLW_kdissH2I')].value / data.ds.time_unit).convert_to_units('1/s')
else:
kdissH2I = (np.zeros(np.shape(data['Density'])) / data.ds.time_unit).to('1/s')
LW_flux = kdissH2I / H2Isigma * LW_energy
return LW_flux.convert_to_units('erg/cm**2/s')
def _Q0_flux(field, data):
E_HI = 13.6 * yt.units.eV
kph = data[('enzo','HI_kph')].convert_to_cgs()
n = data[('gas','H_p0_number_density')].convert_to_cgs()
dt = data.ds.parameters['dtPhoton']
V = data['cell_volume'].convert_to_cgs()
dx = data['dx'].convert_to_cgs()
s = 6.34629E-18 * yt.units.cm**(2) # cross section of HI at 13.6 eV
tau = s * n * dx
denom = 1.0 - np.exp(-tau)
Q = kph * n * V / denom # this gives number of photons / s
flux = Q * E_HI / dx**2
return flux.convert_to_units('erg/cm**2/s')
def _Q1_flux(ds,data):
E_HeI = 24.6 * yt.units.eV
kph = data[('enzo','HeI_kph')].convert_to_cgs()
n = data[('gas','H_p0_number_density')].convert_to_cgs()
dt = data.ds.parameters['dtPhoton']
V = data['cell_volume'].convert_to_cgs()
dx = data['dx'].convert_to_cgs()
s = 7.4300459E-18 * yt.units.cm**(2) # cross section of HeI at 24.6 eV
tau = s * n * dx
denom = 1.0 - np.exp(-tau)
Q = kph * n * V / denom # this gives number of photons / s
flux = Q * E_HeI / dx**2
return flux.convert_to_units('erg/cm**2/s')
def _metal_total_mass(field, data):
mass = data['Metal_Density'] * data['cell_volume']
return mass.convert_to_units('g')
def _grav_pot(field,data):
try:
x = (data['PotentialField'] * -1.0).convert_to_units('erg/g')
except:
x = ( (data['GravPotential'].value * data.ds.velocity_unit**2)
* -1.0).convert_to_units('erg/g')
return x
def _tot_grav_pot(field,data):
try:
x = data['PotentialField']
except:
x = data['GravPotential'].value * data.ds.velocity_unit**2
x = x + data[('index','DM_background_potential')]
return x.convert_to_units('erg/g')
def _gas_grav_pot(field,data):
try:
x = data['PotentialField']
except:
x = data['GravPotential'].value * data.ds.velocity_unit**2
return x.convert_to_units('erg/g')
def _pos_tot_grav_pot(field, data):
return np.abs(data[('gas','total_gravitational_potential')])
def _potential_energy(field,data):
x = data[('gas','total_gravitational_potential')] * data['cell_mass']
return x.convert_to_units('erg')
def _grav_bound(field, data):
PE = data[('gas','potential_energy')].convert_to_units('erg')
TE = ( data[('gas','thermal_energy')] * data['cell_mass'].convert_to_units('g')).convert_to_units('erg')
KE = ( data[('gas','kinetic_energy')] * data['cell_volume']).convert_to_units('erg')
result = 1 * ((TE + KE) + PE < 0.0)
return result*1.0
def _mag_cyl_r(field,data):
return np.abs( data[('index','cylindrical_radius')].convert_to_units('cm'))
def _mag_cyl_z(field,data):
return np.abs( data[('index','cylindrical_z')].convert_to_units('cm') )
def _dm_density(field, data):
r = data[('index','spherical_r')].convert_to_units('cm')
r_s = (data.ds.parameters['DiskGravityDarkMatterR'] * yt.units.Mpc).convert_to_units('cm')
rho_o = (data.ds.parameters['DiskGravityDarkMatterDensity'] * yt.units.g / yt.units.cm**3)
rho = dm_halo.burkert_density(r, r_s, rho_o)
return rho.convert_to_cgs()
def _dm_potential(field, data):
r = data[('index','spherical_r')].convert_to_units('cm')
r_s = (data.ds.parameters['DiskGravityDarkMatterR'] * yt.units.Mpc).convert_to_units('cm')
rho_o = (data.ds.parameters['DiskGravityDarkMatterDensity'] * yt.units.g / yt.units.cm**3)
pot = dm_halo.burkert_potential(r, r_s, rho_o)
return pot.convert_to_cgs()
def _rad_accel(field, data):
return np.sqrt(data['RadAccel1']**2 + data['RadAccel2']**2 + data['RadAccel3']**2).convert_to_units('cm/s**2')
def _is_star_forming(field, data):
n = data[('gas','number_density')].convert_to_units('cm**(-3)')
T = data['Temperature']
divv = data[('gas','velocity_divergence')]
l = data['grid_level']
answer = 1 * ((n > data.ds.parameters['StarMakerOverDensityThreshold']) *\
(T < data.ds.parameters['IndividualStarTemperatureThreshold']) *\
(divv < 0) *\
(l == data.ds.parameters['MaximumRefinementLevel']))
return answer
yt.add_field(('gas','is_star_forming'), function = _is_star_forming,
units = "")
yt.add_field(("gas","a_rad"), function=_rad_accel, units="cm/s**2")
yt.add_field(('index','DM_background_density'), function = _dm_density, units = 'g/cm**3')
yt.add_field(('index','DM_background_potential'), function = _dm_potential, units = 'erg/g')
yt.add_field(('index','magnitude_cylindrical_radius'), function = _mag_cyl_r, units = 'cm')
yt.add_field(('index','magnitude_cylindrical_z'), function = _mag_cyl_z, units = 'cm')
# def _H2_total_mass(field, data):
# mass = data[('gas',
yt.add_field(('gas','Pe_heating_rate'), function = _pe_heating_cgs, units = 'erg/s/cm**3')
yt.add_field(('gas','H_total_mass'), function = _H_total_mass, units ='g')
yt.add_field(('gas','H_Mass'), function = _H_total_mass, units = 'g') # define as same
yt.add_field(('gas','He_total_mass'), function = _He_total_mass, units = 'g')
yt.add_field(('gas','metal_mass'), function = _metal_total_mass, units = 'g')
yt.add_field(('gas','OTLW_kdissH2I'), function = _otlwcgs, units = '1/s',
validators=ValidateDataField(('enzo','OTLW_kdissH2I')))
yt.add_field(('gas','LW_flux'), function = _LW_flux, units = "erg/s/cm**2",
validators=ValidateDataField(('enzo','OTLW_kdissH2I')))
yt.add_field(('gas','Pe_heating_rate_masked'), function = _pe_heating_rate_masked, units='erg/s/cm**3')
yt.add_field(('gas','G_o'), function = _G_o, units = "")
yt.add_field(('gas','G_eff'), function = _G_eff, units = "")
yt.add_field(('gas','FUV_flux'), function = _FUV_flux, units = "erg/s/cm**2")
yt.add_field(('gas','Q0_flux'), function = _Q0_flux, units = "erg/s/cm**2")
yt.add_field(('gas','Q1_flux'), function = _Q1_flux, units = "erg/s/cm**2")
# yt.add_field(('gas','H2_total_mass'), function = _H2_total_mass, units = 'g')
# yt.add_field(('gas','All_H_total_mass'), function = _all_H_total_mass, units = 'g')
if ('enzo','PotentialField') in fields or ('enzo', 'GravPotential') in fields:
yt.add_field(('gas','pos_gravitational_potential'), function=_grav_pot, units = 'erg/g')
yt.add_field(('gas','gas_gravitational_potential'), function=_gas_grav_pot, units = 'erg/g')
yt.add_field(('gas','total_gravitational_potential'), function=_tot_grav_pot, units = 'erg/g')
yt.add_field(('gas','pos_total_gravitational_potential'), function=_pos_tot_grav_pot, units = 'erg/g')
yt.add_field(('gas','potential_energy'), function=_potential_energy, units = 'erg')
yt.add_field(('gas','gravitationally_bound'), function=_grav_bound, units = "")
nfields = 5
return nfields
def generate_derived_fields(ds):
"""
Given a data set (to extract the on-disk field names), generate
derived fields that will persist for the python session (i.e. not
tied only to the passed data set).
Right now, takes in all metal species tracer fields and constructs
fields for their mass fraction, number density, and all possible
interesting abundance ratios.
NOTE: The derived fields will only exist for data sets loaded after
this function call. If analysis is intended for passed data set,
it will need to be reloaded for fields to exist.
"""
fields = ds.field_list
# lets figure out the metal tracers present
metals = utilities.species_from_fields(fields)
ratios = utilities.ratios_list(metals)
# make new functions to do correct units for species fields
_density_function_generator(metals + ['Metal'])
print("tracer species present: ", metals)
nfields = _mass_function_generator(metals)
print(nfields, "mass fields defined")
nfields = _mass_fraction_function_generator(ds, metals)
print(nfields, "mass fraction fields defined")
nfields = _number_density_function_generator(metals)
print(nfields, "number density fields defined")
if not (ionization._ion_table is None):
nfields = _ionization_state_generator(metals)
print(nfields, "ionization state fields defined")
nfields = _abundance_ratio_function_generator(ratios, H_mode = 'total')
print(nfields, "abundance ratio fields defined")
nfields = _abundance_function_generator(metals)
if ds.parameters['NumberOfParticles'] > 0:
if ('io','particle_' + metals[0] + '_fraction') in ds.field_list:
nfields = _particle_abundance_ratio_function_generator(ratios, ds)
print(nfields, "particle abundance ratio fields defined")
_particle_abundance_function_generator(metals, ds)
generate_stellar_model_fields(ds)
nfields = _additional_helper_fields(fields)
print(nfields, "additional helper fields defined")
#generate_grackle_fields(ds)
FIELDS_DEFINED = True
return
def load_and_define(name):
"""
Wrapper around yt to load a data set and define gradient
fields and particle filters which must be defined for each
simulation file separately (unlike the above)
"""
ds = yt.load(name)
generate_gradient_fields(ds)
def _grav_accel_x(field,data):
return data[('gas','gas_gravitational_potential_gradient_x')].convert_to_units('cm/s**2')
def _grav_accel_y(field,data):
return data[('gas','gas_gravitational_potential_gradient_y')].convert_to_units('cm/s**2')
def _grav_accel_z(field,data):
return data[('gas','gas_gravitational_potential_gradient_z')].convert_to_units('cm/s**2')
def _grav_accel(field,data):
return np.sqrt(data[('gas','a_grav_x')]**2 + data[('gas','a_grav_y')]**2 + data[('gas','a_grav_z')]**2)
def _a_rad_a_grav(field,data):
a = data[('gas','a_rad')] / data[('gas','a_grav')]
a[data[('gas','a_grav')] == 0.0] = 0.0
return a
ds.add_field(('gas','a_grav_x'), function = _grav_accel_x, units = 'cm/s**2', sampling_type='cell')
ds.add_field(('gas','a_grav_y'), function = _grav_accel_y, units = 'cm/s**2', sampling_type='cell')
ds.add_field(('gas','a_grav_z'), function = _grav_accel_z, units = 'cm/s**2', sampling_type='cell')
ds.add_field(('gas','a_grav'), function = _grav_accel, units = 'cm/s**2', sampling_type='cell')
ds.add_field(('gas','a_rad_over_a_grav'), function = _a_rad_a_grav, units = '', sampling_type = 'cell')
generate_particle_filters(ds)
#generate_grackle_fields(ds)
return ds
def generate_grackle_fields(ds):
if not GRACKLE_IMPORTED:
print("Grackle's python wrapper (pygrackle) was not imported successfully")
if ds.parameters['use_grackle']:
_grackle_fields(ds)
return
def generate_gradient_fields(ds):
"""
generate gas self gravity gradient fields and rename them to
something sensible
"""
ds.add_gradient_fields(("gas","gas_gravitational_potential"))
return
def generate_particle_filters(ds):
"""
Make filter definitions for the various particle types:
Main Sequence :
White Dwarf :
SNIa remnant :
SNII remnant :
AGB phase (likely very few or none since short lived) :
"""
@yt.particle_filter(requires=["particle_type"], filtered_type='all')
def main_sequence_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 11
return filter
@yt.particle_filter(requires=["particle_type"], filtered_type='all')
def main_sequence_popIII_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 14
return filter
@yt.particle_filter(requires=["particle_type"], filtered_type='all')
def remnant_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 13
return filter
@yt.particle_filter(requires=["particle_type",'birth_mass'], filtered_type='all')
def low_mass_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 11
filter = filter * (data[(pfilter.filtered_type,"birth_mass")] > 2.0) * (data[(pfilter.filtered_type,"birth_mass")] <8.0)
return filter
@yt.particle_filter(requires=["particle_type",'birth_mass'], filtered_type='all')
def low_mass_unresolved_stars(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 15
return filter
ds.add_particle_filter('main_sequence_stars')
ds.add_particle_filter('remnant_stars')
ds.add_particle_filter('low_mass_stars')
ds.add_particle_filter('low_mass_unresolved_stars')
ds.add_particle_filter('main_sequence_popIII_stars')
return
|
from resources import loading
import ast
import configparser
import os
import sys
import time
from PIL import Image, ImageFilter
from tqdm import tqdm
from resources import customlogger as log
from resources import namenum_converter as conv
from resources.get_counters import get_counter # naming is hard
def format_counter_list(counter_list):
formatted_counter = ''
for pair_ in counter_list:
just_name_ = pair_[0]
just_num_ = pair_[1]
full_counter = conv.fancify(just_name_) + ': ' + str(just_num_)
formatted_counter += (full_counter + ', ')
return formatted_counter[:-2] # removes extra comma and space
def log_any_uncaught_exception(type_, value, traceback):
log.critical("Uncaught exception: {} {} {}".format(type_, value, traceback))
raise SystemError
sys.excepthook = log_any_uncaught_exception
log.info("START")
# defaults
refresh_delay = 0.5
process_allies = True
max_logs = 10
dev = False
try:
config = configparser.ConfigParser() # load some settings
with open('inah-settings.ini', 'r') as configfile:
config.read('inah-settings.ini')
refresh_delay = float(config['MAIN']['refresh_delay'])
process_allies = ast.literal_eval(config['MAIN']['process_allies'])
max_logs = float(config['MAIN']['max_logs'])
dev = ast.literal_eval(config['MAIN']['dev'])
settings_raw = configfile.readlines()
settings_raw = settings_raw[0:13]
log.info("Settings: " + str(settings_raw))
except:
settings_error = "Couldn't load settings " + str(sys.exc_info())
print(settings_error + ", reverting to default settings")
log.error(settings_error)
log.cleanup(max_logs)
heroes = ['ana', 'bastion', 'dva', 'genji', 'hanzo',
'junkrat', 'lucio', 'mccree', 'mei', 'mercy',
'pharah', 'reaper', 'reinhardt', 'roadhog', 'soldier',
'sombra', 'symmetra', 'torbjorn', 'tracer', 'widowmaker',
'winston', 'zarya', 'zenyatta', 'unknown', 'loading',
'anadead', 'bastiondead', 'dvadead', 'genjidead', 'junkratdead',
'luciodead', 'mccreedead', 'meidead', 'pharahdead', 'reaperdead',
'roadhogdead', 'soldierdead', 'sombradead', 'torbjorndead', 'tracerdead',
'zaryadead', 'zenyattadead', 'hanzodead', 'mercydead', 'orisadead',
'reinhardtdead', 'symmetradead', 'widowmakerdead', 'winstondead', 'orisa',
'doomfist', 'doomfistdead']
heroes_dps = ['bastion', 'genji', 'hanzo', 'junkrat', 'mccree',
'mei', 'pharah', 'reaper', 'soldier', 'sombra',
'symmetra', 'torbjorn', 'tracer', 'widowmaker', 'doomfist']
heroes_tank = ['dva', 'reinhardt', 'roadhog', 'winston', 'zarya', 'orisa']
heroes_heal = ['ana', 'lucio', 'mercy', 'zenyatta']
heroes_normal = [] # a list of heroes, not fancy, without unknown, loading, or dead
for i in heroes:
hero = conv.strip_dead(i)
if ('unknown' not in hero) and ('loading' not in hero):
heroes_normal.append(hero)
if process_allies:
filenames = ['ally1', 'ally2', 'ally3', 'ally4', 'ally5', 'ally6',
'enemy1', 'enemy2', 'enemy3', 'enemy4', 'enemy5', 'enemy6']
else:
filenames = ['enemy1', 'enemy2', 'enemy3', 'enemy4', 'enemy5', 'enemy6']
if dev:
print('FYI, developer mode is on.')
dev_file = 'testing/bettercrop.jpg'
log.debug("Developer mode is on, dev_file is " + dev_file)
screenshots_path = os.path.expanduser('~\Documents\Overwatch\ScreenShots\Overwatch')
log.info("screenshots_path is " + screenshots_path)
try:
inputs_before = os.listdir(screenshots_path) # a list of every file in the screenshots folder
except FileNotFoundError:
print("Couldn't find the screenshots folder (should be at {})".format(screenshots_path))
log.critical("Couldn't find screenshots_path")
raise SystemExit
log.info('The screenshots folder has ' + str(len(inputs_before)) + " images")
# builds a cache of learned images
learned_images = {}
for learned_path in os.listdir('learned'):
if 'png' in learned_path:
learned = Image.open('learned/' + learned_path).load()
learned_images[learned_path[:-4]] = learned
log.info("The learned folder has " + str(len(learned_images)) + " images")
mask = Image.open('resources/mask.png').convert('RGBA') # used to ignore metal winged BS
log.info("Mask opened: " + str(mask))
loading_time = loading.done()
log.info("Loaded in " + str(loading_time) + " seconds")
loops_done = 0
while True:
if not dev:
time.sleep(refresh_delay) # to stop high cpu usage while waiting
continue_ = False
inputs_after = os.listdir(screenshots_path)
if len(inputs_after) > len(inputs_before): # if a file is added
continue_ = True
if len(inputs_after) < len(inputs_before): # if a file is removed
continue_ = False
inputs_before = os.listdir(screenshots_path)
if continue_ or dev:
# starting analysis
log.info("START LOOP")
log.info("Loop number: " + str(loops_done))
loops_done += 1
process_time_start = time.perf_counter()
# defaults
delete_thresehold = 80
process_threshold = 70
refresh_delay = 0.5
low_precision = False
process_allies = True
include_allies_in_counters = True
highlight_yourself = True
show_processing_text = False
old_counter_list = False
dev = False
preview = False
try:
config = configparser.ConfigParser() # load all settings
with open('inah-settings.ini', 'r') as configfile:
config.read('inah-settings.ini')
delete_thresehold = int(config['MAIN']['delete_thresehold'])
process_threshold = int(config['MAIN']['process_threshold'])
refresh_delay = float(config['MAIN']['refresh_delay'])
low_precision = ast.literal_eval(config['MAIN']['low_precision'])
process_allies = ast.literal_eval(config['MAIN']['process_allies'])
include_allies_in_counters = ast.literal_eval(config['MAIN']['include_allies_in_counters'])
highlight_yourself = ast.literal_eval(config['MAIN']['highlight_yourself'])
show_processing_text = ast.literal_eval(config['MAIN']['show_processing_text'])
old_counter_list = ast.literal_eval(config['MAIN']['old_counter_list'])
dev = ast.literal_eval(config['MAIN']['dev'])
preview = ast.literal_eval(config['MAIN']['preview'])
settings_raw = configfile.readlines()
settings_raw = settings_raw[0:13]
log.info("Settings: " + str(settings_raw))
except:
settings_error = "Couldn't load settings " + str(sys.exc_info())
print(settings_error + ", reverting to default settings")
log.error(settings_error)
inputs_diff = list(set(os.listdir(screenshots_path)) - set(inputs_before))
log.info("inputs_diff is " + str(inputs_diff))
current_filename = str(inputs_diff)[2:-2] # removes brackets and quotes
if dev:
current_filename = dev_file
print("\nProcessing " + current_filename + " at " + str(time.strftime('%I:%M:%S %p', time.localtime())))
log.info("Processing " + current_filename)
if not dev:
try:
time.sleep(0.1) # bug "fix"
screenshot = Image.open(screenshots_path + '/' + inputs_diff[0])
log.info("Screenshot opened successfully: " + str(screenshot))
except OSError as error:
print("This doesn't seem to be an image file.")
inputs_before = os.listdir(screenshots_path) # resets screenshot folder list
log.error("Couldn't open screenshot file: " + str(error))
continue
else:
screenshot = Image.open(dev_file)
log.debug("Dev screenshot opened successfully: " + str(screenshot))
if preview:
screenshot.save('preview.png')
log.info("Saved preview")
else:
try:
os.remove("preview.png")
log.info("Deleted preview")
except FileNotFoundError:
log.info("No preview to delete")
pass
width, height = screenshot.size
aspect_ratio = width / height
log.info("Aspect ratio is {} ({} / {})".format(aspect_ratio, width, height))
if aspect_ratio > 2: # the aspect ratio the user is running at is 21:9
log.info("Formatted aspect ratio is closest to 21:9, processing accordingly")
if not (width == 2579 and height == 1080):
screenshot = screenshot.resize((2579, 1080), resample=Image.BICUBIC)
screenshot = screenshot.crop((329, 0, 2249, 1080))
elif aspect_ratio < 1.7: # aspect ratio is 16:10
log.info("Formatted aspect ratio is closest to 16:10, processing accordingly")
if not (width == 1920 and height == 1200):
screenshot = screenshot.resize((1920, 1200), resample=Image.BICUBIC)
screenshot = screenshot.crop((0, 60, 1920, 1140))
else: # aspect ratio is 16:9
log.info("Formatted aspect ratio is closest to 16:9, processing accordingly")
if not (width == 1920 and height == 1080):
screenshot = screenshot.resize((1920, 1080), resample=Image.BICUBIC)
if low_precision:
step = 2 # skips every other pixel
divisor = 64000 # scary magic math
else:
step = 1
divisor = 256000
ally1 = screenshot.crop((443, 584, 519, 660))
ally2 = screenshot.crop((634, 584, 710, 660))
ally3 = screenshot.crop((826, 584, 902, 660))
ally4 = screenshot.crop((1019, 584, 1095, 660))
ally5 = screenshot.crop((1210, 584, 1286, 660))
ally6 = screenshot.crop((1402, 584, 1478, 660))
enemy1 = screenshot.crop((443, 279, 519, 355))
enemy2 = screenshot.crop((634, 279, 710, 355))
enemy3 = screenshot.crop((826, 279, 902, 355))
enemy4 = screenshot.crop((1019, 279, 1095, 355))
enemy5 = screenshot.crop((1210, 279, 1286, 355))
enemy6 = screenshot.crop((1402, 279, 1478, 355))
filenames_opened = []
if process_allies:
filenames_opened.append(ally1)
filenames_opened.append(ally2)
filenames_opened.append(ally3)
filenames_opened.append(ally4)
filenames_opened.append(ally5)
filenames_opened.append(ally6)
filenames_opened.append(enemy1)
filenames_opened.append(enemy2)
filenames_opened.append(enemy3)
filenames_opened.append(enemy4)
filenames_opened.append(enemy5)
filenames_opened.append(enemy6)
allied_team = []
enemy_team = []
total_confidence = []
team_confidences = []
log.info("Starting image recognition")
for h in tqdm(range(0, len(filenames)), file=sys.stdout, ncols=40, bar_format='{l_bar}{bar}|'):
# loads an portrait to process
unknown_unloaded = filenames_opened[h]
unknown_unloaded = unknown_unloaded.filter(ImageFilter.GaussianBlur(radius=1))
unknown_unloaded.paste(mask, (0, 0), mask) # ...until I put on the mask
unknown = unknown_unloaded.load()
confidences = []
for i in heroes:
confidences.append(0) # makes a hero-long list of zeroes
for j in range(0, len(heroes)): # the image recognition magic
learned_image = learned_images[heroes[j]]
for x in range(0, 75, step):
for y in range(0, 75, step):
input_color = unknown[x, y]
learned_color = learned_image[x, y]
confidences[j] += abs(input_color[0] - learned_color[0])
confidences[j] = 1 - (confidences[j] / divisor)
if show_processing_text:
print("For " + filenames[h] + ":")
likely_name = '' # find the most likely hero
likely_num = -1
for i in range(0, len(confidences)):
if confidences[i] > likely_num:
likely_num = confidences[i]
likely_name = heroes[i]
print_conf = int(likely_num * 100)
if print_conf < 0:
print_conf = 0
if show_processing_text:
print("Most likely is " + likely_name
+ ", with a confidence of " + str(print_conf) + "%")
total_confidence.append(print_conf)
if 'ally' in filenames[h]:
allied_team.append(likely_name) # builds the team lists
elif 'enemy' in filenames[h]:
enemy_team.append(likely_name)
print('\n')
process_time_elapsed = time.perf_counter() - process_time_start
print("Processing finished in " + str(process_time_elapsed)[0:3] + " seconds")
log.info("Image recognition finished in " + str(process_time_elapsed) + " seconds")
log.info("Enemy team is " + str(enemy_team))
if process_allies:
log.info("Allied team is " + str(allied_team))
log.info("Confidences (allied first): " + str(total_confidence))
enemy_team_fancy = ''
for i in enemy_team:
hero = conv.fancify(i)
enemy_team_fancy += (hero + ', ')
if process_allies:
allied_team_fancy = ''
for i in allied_team:
hero = conv.fancify(i)
allied_team_fancy += (hero + ', ')
total_conf_average = int(sum(total_confidence) / float(len(total_confidence)))
log.info("Image recognition had a confidence of " + str(total_conf_average))
if total_conf_average > process_threshold:
print("Enemy team: " + enemy_team_fancy[:-2])
print("Allied team: " + allied_team_fancy[:-2])
print("Confidence: " + str(total_conf_average) + '%')
else:
print("This screenshot doesn't seem to be of the tab menu " +
"(needs " + str(process_threshold) + "% confidence, got " + str(total_conf_average) + "%)")
enemy_is_heroes = True
j = 0
for i in enemy_team:
if (i == 'loading') or (i == 'unknown'):
j += 1
if j == 6:
enemy_is_heroes = False # if everyone on the enemy team is loading or unknown
log.info("The enemy team IS loading or unknown")
else:
log.info("The enemy team is NOT loading or unknown")
if total_conf_average > process_threshold and process_allies:
allied_team_alive = []
for possibly_dead_hero in allied_team:
allied_team_alive.append(conv.strip_dead(possibly_dead_hero))
if not any(x in allied_team_alive for x in heroes_dps):
print("Your team doesn't have any DPS heroes!")
if not any(x in allied_team_alive for x in heroes_tank):
print("Your team doesn't have any tank heroes!")
if not any(x in allied_team_alive for x in heroes_heal):
print("Your team doesn't have any healers!")
if total_conf_average > process_threshold and process_allies and enemy_is_heroes:
# get overall team counter advantage
allied_team_counter = 0
for i in enemy_team:
for j in allied_team:
cross_team_counter = get_counter(i, j)
allied_team_counter += cross_team_counter
print_team_counter = round(allied_team_counter)
log.info("Overall team counter is {}".format(allied_team_counter))
if allied_team_counter > 1:
print("Your team has an counter advantage of {}".format(print_team_counter))
elif allied_team_counter < -1:
print("The enemy team has an counter advantage of {}".format(-print_team_counter))
else:
print("Neither team has a counter advantage")
if enemy_is_heroes and (total_conf_average > process_threshold): # is this valid to get counters from
# begin getting counters
log.info("Getting counters")
all_counters = {}
for any_hero in heroes_normal: # actually gets counters
all_counters[any_hero] = 0
for enemy_hero in enemy_team:
enemy_hero = conv.strip_dead(enemy_hero)
if ('unknown' not in any_hero) and ('loading' not in any_hero):
countered = get_counter(any_hero, enemy_hero)
all_counters[any_hero] -= countered
sorted_counters = sorted(all_counters.items(), reverse=True, key=lambda z: z[1]) # wtf
log.info("Got " + str(len(sorted_counters)) + " counters")
if not old_counter_list:
dps_counters = []
tank_counters = []
heal_counters = []
for pair in sorted_counters:
just_name = pair[0]
just_num = round(pair[1])
if just_name not in allied_team or include_allies_in_counters:
if just_name in heroes_dps:
dps_counters.append(tuple((just_name, just_num)))
if just_name in heroes_tank:
tank_counters.append(tuple((just_name, just_num)))
if just_name in heroes_heal:
heal_counters.append(tuple((just_name, just_num)))
if just_name == conv.strip_dead(allied_team[0]):
yourself = 'You (' + conv.fancify(just_name) + '): ' + str(just_num)
# no need to sort these, sorted_counters was already sorted (duh)
final_counters_dps = format_counter_list(dps_counters)
final_counters_tank = format_counter_list(tank_counters)
final_counters_heal = format_counter_list(heal_counters)
print('\n')
print("Counters (higher is better)")
print("DPS - " + final_counters_dps)
print("Tanks - " + final_counters_tank)
print("Healers - " + final_counters_heal)
else:
final_counters = format_counter_list(sorted_counters)
print('\n')
print("Counters (higher is better)")
print(final_counters)
if highlight_yourself:
print(yourself)
log.info("Yourself: '" + yourself + "'")
# end getting counters
elif not enemy_is_heroes:
print("\nThe enemy team appears to be all loading or unknown, which counters can't be calculated from.")
print('\n') # managing these is hard
if total_conf_average > delete_thresehold and not dev: # deletes screenshot once done
os.remove(screenshots_path + '/' + inputs_diff[0]) # doesn't recycle, fyi
print("Deleted " + current_filename + ' (needed ' + str(delete_thresehold)
+ '% confidence, got ' + str(total_conf_average) + '%)')
log.info("Deleted screenshot")
else:
print("Didn't delete " + current_filename + ' (needs ' + str(delete_thresehold)
+ '% confidence, got ' + str(total_conf_average) + '%)')
log.info("Didn't delete screenshot")
if delete_thresehold >= 100:
print("The delete threshold is currently 100%, which means that even tab menu screenshots aren't"
" deleted. Be sure to clean the screenshots folder out manually every now and then.")
inputs_before = os.listdir(screenshots_path) # resets screenshot folder list
log.info("END LOOP")
if dev:
log.info("Dev mode is on and a full loop has been completed, exiting")
raise SystemExit
print('\n')
print('Analysis complete. Hold tab and press the "print screen" button to get a new set of counters.')
Changed a log level
from resources import loading
import ast
import configparser
import os
import sys
import time
from PIL import Image, ImageFilter
from tqdm import tqdm
from resources import customlogger as log
from resources import namenum_converter as conv
from resources.get_counters import get_counter # naming is hard
def format_counter_list(counter_list):
formatted_counter = ''
for pair_ in counter_list:
just_name_ = pair_[0]
just_num_ = pair_[1]
full_counter = conv.fancify(just_name_) + ': ' + str(just_num_)
formatted_counter += (full_counter + ', ')
return formatted_counter[:-2] # removes extra comma and space
def log_any_uncaught_exception(type_, value, traceback):
log.critical("Uncaught exception: {} {} {}".format(type_, value, traceback))
raise SystemError
sys.excepthook = log_any_uncaught_exception
log.info("START")
# defaults
refresh_delay = 0.5
process_allies = True
max_logs = 10
dev = False
try:
config = configparser.ConfigParser() # load some settings
with open('inah-settings.ini', 'r') as configfile:
config.read('inah-settings.ini')
refresh_delay = float(config['MAIN']['refresh_delay'])
process_allies = ast.literal_eval(config['MAIN']['process_allies'])
max_logs = float(config['MAIN']['max_logs'])
dev = ast.literal_eval(config['MAIN']['dev'])
settings_raw = configfile.readlines()
settings_raw = settings_raw[0:13]
log.info("Settings: " + str(settings_raw))
except:
settings_error = "Couldn't load settings " + str(sys.exc_info())
print(settings_error + ", reverting to default settings")
log.error(settings_error)
log.cleanup(max_logs)
heroes = ['ana', 'bastion', 'dva', 'genji', 'hanzo',
'junkrat', 'lucio', 'mccree', 'mei', 'mercy',
'pharah', 'reaper', 'reinhardt', 'roadhog', 'soldier',
'sombra', 'symmetra', 'torbjorn', 'tracer', 'widowmaker',
'winston', 'zarya', 'zenyatta', 'unknown', 'loading',
'anadead', 'bastiondead', 'dvadead', 'genjidead', 'junkratdead',
'luciodead', 'mccreedead', 'meidead', 'pharahdead', 'reaperdead',
'roadhogdead', 'soldierdead', 'sombradead', 'torbjorndead', 'tracerdead',
'zaryadead', 'zenyattadead', 'hanzodead', 'mercydead', 'orisadead',
'reinhardtdead', 'symmetradead', 'widowmakerdead', 'winstondead', 'orisa',
'doomfist', 'doomfistdead']
heroes_dps = ['bastion', 'genji', 'hanzo', 'junkrat', 'mccree',
'mei', 'pharah', 'reaper', 'soldier', 'sombra',
'symmetra', 'torbjorn', 'tracer', 'widowmaker', 'doomfist']
heroes_tank = ['dva', 'reinhardt', 'roadhog', 'winston', 'zarya', 'orisa']
heroes_heal = ['ana', 'lucio', 'mercy', 'zenyatta']
heroes_normal = [] # a list of heroes, not fancy, without unknown, loading, or dead
for i in heroes:
hero = conv.strip_dead(i)
if ('unknown' not in hero) and ('loading' not in hero):
heroes_normal.append(hero)
if process_allies:
filenames = ['ally1', 'ally2', 'ally3', 'ally4', 'ally5', 'ally6',
'enemy1', 'enemy2', 'enemy3', 'enemy4', 'enemy5', 'enemy6']
else:
filenames = ['enemy1', 'enemy2', 'enemy3', 'enemy4', 'enemy5', 'enemy6']
if dev:
print('FYI, developer mode is on.')
dev_file = 'testing/bettercrop.jpg'
log.debug("Developer mode is on, dev_file is " + dev_file)
screenshots_path = os.path.expanduser('~\Documents\Overwatch\ScreenShots\Overwatch')
log.info("screenshots_path is " + screenshots_path)
try:
inputs_before = os.listdir(screenshots_path) # a list of every file in the screenshots folder
except FileNotFoundError:
print("Couldn't find the screenshots folder (should be at {})".format(screenshots_path))
log.critical("Couldn't find screenshots_path")
raise SystemExit
log.info('The screenshots folder has ' + str(len(inputs_before)) + " images")
# builds a cache of learned images
learned_images = {}
for learned_path in os.listdir('learned'):
if 'png' in learned_path:
learned = Image.open('learned/' + learned_path).load()
learned_images[learned_path[:-4]] = learned
log.info("The learned folder has " + str(len(learned_images)) + " images")
mask = Image.open('resources/mask.png').convert('RGBA') # used to ignore metal winged BS
log.info("Mask opened: " + str(mask))
loading_time = loading.done()
log.info("Loaded in " + str(loading_time) + " seconds")
loops_done = 0
while True:
if not dev:
time.sleep(refresh_delay) # to stop high cpu usage while waiting
continue_ = False
inputs_after = os.listdir(screenshots_path)
if len(inputs_after) > len(inputs_before): # if a file is added
continue_ = True
if len(inputs_after) < len(inputs_before): # if a file is removed
continue_ = False
inputs_before = os.listdir(screenshots_path)
if continue_ or dev:
# starting analysis
log.info("START LOOP")
log.info("Loop number: " + str(loops_done))
loops_done += 1
process_time_start = time.perf_counter()
# defaults
delete_thresehold = 80
process_threshold = 70
refresh_delay = 0.5
low_precision = False
process_allies = True
include_allies_in_counters = True
highlight_yourself = True
show_processing_text = False
old_counter_list = False
dev = False
preview = False
try:
config = configparser.ConfigParser() # load all settings
with open('inah-settings.ini', 'r') as configfile:
config.read('inah-settings.ini')
delete_thresehold = int(config['MAIN']['delete_thresehold'])
process_threshold = int(config['MAIN']['process_threshold'])
refresh_delay = float(config['MAIN']['refresh_delay'])
low_precision = ast.literal_eval(config['MAIN']['low_precision'])
process_allies = ast.literal_eval(config['MAIN']['process_allies'])
include_allies_in_counters = ast.literal_eval(config['MAIN']['include_allies_in_counters'])
highlight_yourself = ast.literal_eval(config['MAIN']['highlight_yourself'])
show_processing_text = ast.literal_eval(config['MAIN']['show_processing_text'])
old_counter_list = ast.literal_eval(config['MAIN']['old_counter_list'])
dev = ast.literal_eval(config['MAIN']['dev'])
preview = ast.literal_eval(config['MAIN']['preview'])
settings_raw = configfile.readlines()
settings_raw = settings_raw[0:13]
log.info("Settings: " + str(settings_raw))
except:
settings_error = "Couldn't load settings " + str(sys.exc_info())
print(settings_error + ", reverting to default settings")
log.error(settings_error)
inputs_diff = list(set(os.listdir(screenshots_path)) - set(inputs_before))
log.info("inputs_diff is " + str(inputs_diff))
current_filename = str(inputs_diff)[2:-2] # removes brackets and quotes
if dev:
current_filename = dev_file
print("\nProcessing " + current_filename + " at " + str(time.strftime('%I:%M:%S %p', time.localtime())))
log.info("Processing " + current_filename)
if not dev:
try:
time.sleep(0.1) # bug "fix"
screenshot = Image.open(screenshots_path + '/' + inputs_diff[0])
log.info("Screenshot opened successfully: " + str(screenshot))
except OSError as error:
print("This doesn't seem to be an image file.")
inputs_before = os.listdir(screenshots_path) # resets screenshot folder list
log.error("Couldn't open screenshot file: " + str(error))
continue
else:
screenshot = Image.open(dev_file)
log.debug("Dev screenshot opened successfully: " + str(screenshot))
if preview:
screenshot.save('preview.png')
log.info("Saved preview")
else:
try:
os.remove("preview.png")
log.info("Deleted preview")
except FileNotFoundError:
log.info("No preview to delete")
pass
width, height = screenshot.size
aspect_ratio = width / height
log.info("Aspect ratio is {} ({} / {})".format(aspect_ratio, width, height))
if aspect_ratio > 2: # the aspect ratio the user is running at is 21:9
log.info("Formatted aspect ratio is closest to 21:9, processing accordingly")
if not (width == 2579 and height == 1080):
screenshot = screenshot.resize((2579, 1080), resample=Image.BICUBIC)
screenshot = screenshot.crop((329, 0, 2249, 1080))
elif aspect_ratio < 1.7: # aspect ratio is 16:10
log.info("Formatted aspect ratio is closest to 16:10, processing accordingly")
if not (width == 1920 and height == 1200):
screenshot = screenshot.resize((1920, 1200), resample=Image.BICUBIC)
screenshot = screenshot.crop((0, 60, 1920, 1140))
else: # aspect ratio is 16:9
log.info("Formatted aspect ratio is closest to 16:9, processing accordingly")
if not (width == 1920 and height == 1080):
screenshot = screenshot.resize((1920, 1080), resample=Image.BICUBIC)
if low_precision:
step = 2 # skips every other pixel
divisor = 64000 # scary magic math
else:
step = 1
divisor = 256000
ally1 = screenshot.crop((443, 584, 519, 660))
ally2 = screenshot.crop((634, 584, 710, 660))
ally3 = screenshot.crop((826, 584, 902, 660))
ally4 = screenshot.crop((1019, 584, 1095, 660))
ally5 = screenshot.crop((1210, 584, 1286, 660))
ally6 = screenshot.crop((1402, 584, 1478, 660))
enemy1 = screenshot.crop((443, 279, 519, 355))
enemy2 = screenshot.crop((634, 279, 710, 355))
enemy3 = screenshot.crop((826, 279, 902, 355))
enemy4 = screenshot.crop((1019, 279, 1095, 355))
enemy5 = screenshot.crop((1210, 279, 1286, 355))
enemy6 = screenshot.crop((1402, 279, 1478, 355))
filenames_opened = []
if process_allies:
filenames_opened.append(ally1)
filenames_opened.append(ally2)
filenames_opened.append(ally3)
filenames_opened.append(ally4)
filenames_opened.append(ally5)
filenames_opened.append(ally6)
filenames_opened.append(enemy1)
filenames_opened.append(enemy2)
filenames_opened.append(enemy3)
filenames_opened.append(enemy4)
filenames_opened.append(enemy5)
filenames_opened.append(enemy6)
allied_team = []
enemy_team = []
total_confidence = []
team_confidences = []
log.info("Starting image recognition")
for h in tqdm(range(0, len(filenames)), file=sys.stdout, ncols=40, bar_format='{l_bar}{bar}|'):
# loads an portrait to process
unknown_unloaded = filenames_opened[h]
unknown_unloaded = unknown_unloaded.filter(ImageFilter.GaussianBlur(radius=1))
unknown_unloaded.paste(mask, (0, 0), mask) # ...until I put on the mask
unknown = unknown_unloaded.load()
confidences = []
for i in heroes:
confidences.append(0) # makes a hero-long list of zeroes
for j in range(0, len(heroes)): # the image recognition magic
learned_image = learned_images[heroes[j]]
for x in range(0, 75, step):
for y in range(0, 75, step):
input_color = unknown[x, y]
learned_color = learned_image[x, y]
confidences[j] += abs(input_color[0] - learned_color[0])
confidences[j] = 1 - (confidences[j] / divisor)
if show_processing_text:
print("For " + filenames[h] + ":")
likely_name = '' # find the most likely hero
likely_num = -1
for i in range(0, len(confidences)):
if confidences[i] > likely_num:
likely_num = confidences[i]
likely_name = heroes[i]
print_conf = int(likely_num * 100)
if print_conf < 0:
print_conf = 0
if show_processing_text:
print("Most likely is " + likely_name
+ ", with a confidence of " + str(print_conf) + "%")
total_confidence.append(print_conf)
if 'ally' in filenames[h]:
allied_team.append(likely_name) # builds the team lists
elif 'enemy' in filenames[h]:
enemy_team.append(likely_name)
print('\n')
process_time_elapsed = time.perf_counter() - process_time_start
print("Processing finished in " + str(process_time_elapsed)[0:3] + " seconds")
log.info("Image recognition finished in " + str(process_time_elapsed) + " seconds")
log.info("Enemy team is " + str(enemy_team))
if process_allies:
log.info("Allied team is " + str(allied_team))
log.info("Confidences (allied first): " + str(total_confidence))
enemy_team_fancy = ''
for i in enemy_team:
hero = conv.fancify(i)
enemy_team_fancy += (hero + ', ')
if process_allies:
allied_team_fancy = ''
for i in allied_team:
hero = conv.fancify(i)
allied_team_fancy += (hero + ', ')
total_conf_average = int(sum(total_confidence) / float(len(total_confidence)))
log.info("Image recognition had a confidence of " + str(total_conf_average))
if total_conf_average > process_threshold:
print("Enemy team: " + enemy_team_fancy[:-2])
print("Allied team: " + allied_team_fancy[:-2])
print("Confidence: " + str(total_conf_average) + '%')
else:
print("This screenshot doesn't seem to be of the tab menu " +
"(needs " + str(process_threshold) + "% confidence, got " + str(total_conf_average) + "%)")
enemy_is_heroes = True
j = 0
for i in enemy_team:
if (i == 'loading') or (i == 'unknown'):
j += 1
if j == 6:
enemy_is_heroes = False # if everyone on the enemy team is loading or unknown
log.info("The enemy team IS loading or unknown")
else:
log.info("The enemy team is NOT loading or unknown")
if total_conf_average > process_threshold and process_allies:
allied_team_alive = []
for possibly_dead_hero in allied_team:
allied_team_alive.append(conv.strip_dead(possibly_dead_hero))
if not any(x in allied_team_alive for x in heroes_dps):
print("Your team doesn't have any DPS heroes!")
if not any(x in allied_team_alive for x in heroes_tank):
print("Your team doesn't have any tank heroes!")
if not any(x in allied_team_alive for x in heroes_heal):
print("Your team doesn't have any healers!")
if total_conf_average > process_threshold and process_allies and enemy_is_heroes:
# get overall team counter advantage
allied_team_counter = 0
for i in enemy_team:
for j in allied_team:
cross_team_counter = get_counter(i, j)
allied_team_counter += cross_team_counter
print_team_counter = round(allied_team_counter)
log.info("Overall team counter is {}".format(allied_team_counter))
if allied_team_counter > 1:
print("Your team has an counter advantage of {}".format(print_team_counter))
elif allied_team_counter < -1:
print("The enemy team has an counter advantage of {}".format(-print_team_counter))
else:
print("Neither team has a counter advantage")
if enemy_is_heroes and (total_conf_average > process_threshold): # is this valid to get counters from
# begin getting counters
log.info("Getting counters")
all_counters = {}
for any_hero in heroes_normal: # actually gets counters
all_counters[any_hero] = 0
for enemy_hero in enemy_team:
enemy_hero = conv.strip_dead(enemy_hero)
if ('unknown' not in any_hero) and ('loading' not in any_hero):
countered = get_counter(any_hero, enemy_hero)
all_counters[any_hero] -= countered
sorted_counters = sorted(all_counters.items(), reverse=True, key=lambda z: z[1]) # wtf
log.info("Got " + str(len(sorted_counters)) + " counters")
if not old_counter_list:
dps_counters = []
tank_counters = []
heal_counters = []
for pair in sorted_counters:
just_name = pair[0]
just_num = round(pair[1])
if just_name not in allied_team or include_allies_in_counters:
if just_name in heroes_dps:
dps_counters.append(tuple((just_name, just_num)))
if just_name in heroes_tank:
tank_counters.append(tuple((just_name, just_num)))
if just_name in heroes_heal:
heal_counters.append(tuple((just_name, just_num)))
if just_name == conv.strip_dead(allied_team[0]):
yourself = 'You (' + conv.fancify(just_name) + '): ' + str(just_num)
# no need to sort these, sorted_counters was already sorted (duh)
final_counters_dps = format_counter_list(dps_counters)
final_counters_tank = format_counter_list(tank_counters)
final_counters_heal = format_counter_list(heal_counters)
print('\n')
print("Counters (higher is better)")
print("DPS - " + final_counters_dps)
print("Tanks - " + final_counters_tank)
print("Healers - " + final_counters_heal)
else:
final_counters = format_counter_list(sorted_counters)
print('\n')
print("Counters (higher is better)")
print(final_counters)
if highlight_yourself:
print(yourself)
log.info("Yourself: '" + yourself + "'")
# end getting counters
elif not enemy_is_heroes:
print("\nThe enemy team appears to be all loading or unknown, which counters can't be calculated from.")
print('\n') # managing these is hard
if total_conf_average > delete_thresehold and not dev: # deletes screenshot once done
os.remove(screenshots_path + '/' + inputs_diff[0]) # doesn't recycle, fyi
print("Deleted " + current_filename + ' (needed ' + str(delete_thresehold)
+ '% confidence, got ' + str(total_conf_average) + '%)')
log.info("Deleted screenshot")
else:
print("Didn't delete " + current_filename + ' (needs ' + str(delete_thresehold)
+ '% confidence, got ' + str(total_conf_average) + '%)')
log.info("Didn't delete screenshot")
if delete_thresehold >= 100:
print("The delete threshold is currently 100%, which means that even tab menu screenshots aren't"
" deleted. Be sure to clean the screenshots folder out manually every now and then.")
inputs_before = os.listdir(screenshots_path) # resets screenshot folder list
log.info("END LOOP")
if dev:
log.debug("Dev mode is on and a full loop has been completed, exiting")
raise SystemExit
print('\n')
print('Analysis complete. Hold tab and press the "print screen" button to get a new set of counters.')
|
import sys
import nltk
import codecs
from pyspark import SparkContext
from kilogram.dataset.dbpedia import NgramEntityResolver
from kilogram.lang.tokenize import default_tokenize_func
from kilogram.dataset.edit_histories.wikipedia import line_filter
N = int(sys.argv[4])
sc = SparkContext(appName="SparkGenerateTypedNgrams")
lines = sc.textFile(sys.argv[1])
unambiguous_labels = {}
ner = NgramEntityResolver("dbpedia_data.txt", "dbpedia_2015-04.owl")
for line in codecs.open("unambiguous_labels.txt", 'r', 'utf-8'):
label, uri = line.split('\t')
unambiguous_labels[label] = uri
# Split each line into words
def generate_ngrams(line):
result = []
line = line.strip()
for sentence in line_filter(' '.join(default_tokenize_func(line))):
tokens_plain = []
sentence = sentence.split()
i = 0
while i < len(sentence):
for j in range(min(len(sentence), i+20), i, -1):
token = ' '.join(sentence[i:j])
if i+1 == j and i == 0:
# if first word in sentence -> do not attempt to link, could be wrong (Apple)
tokens_plain.append(token)
elif token in unambiguous_labels:
uri = unambiguous_labels[token]
# get types
tokens_plain.append(ner.get_type(uri, -1))
i = j-1
break
i += 1
for n in range(1, N+1):
for ngram in nltk.ngrams(tokens_plain, n):
result.append((' '.join(ngram), 1))
return result
ngrams = lines.flatMap(generate_ngrams).reduceByKey(lambda n1, n2: n1 + n2).filter(lambda x: x[1] > 1)
def printer(value):
return value[0] + '\t' + str(value[1])
ngrams.map(printer).saveAsTextFile(sys.argv[2])
minors
import sys
import nltk
import codecs
from pyspark import SparkContext
from kilogram.dataset.dbpedia import NgramEntityResolver
from kilogram.lang.tokenize import default_tokenize_func
from kilogram.dataset.edit_histories.wikipedia import line_filter
N = int(sys.argv[3])
sc = SparkContext(appName="SparkGenerateTypedNgrams")
lines = sc.textFile(sys.argv[1])
unambiguous_labels = {}
ner = NgramEntityResolver("dbpedia_data.txt", "dbpedia_2015-04.owl")
for line in codecs.open("unambiguous_labels.txt", 'r', 'utf-8'):
label, uri = line.split('\t')
unambiguous_labels[label] = uri
# free some space
ner.redirects_file = None
# Split each line into words
def generate_ngrams(line):
result = []
line = line.strip()
for sentence in line_filter(' '.join(default_tokenize_func(line))):
tokens_plain = []
sentence = sentence.split()
i = 0
while i < len(sentence):
for j in range(min(len(sentence), i+20), i, -1):
token = ' '.join(sentence[i:j])
if i+1 == j and i == 0:
# if first word in sentence -> do not attempt to link, could be wrong (Apple)
tokens_plain.append(token)
elif token in unambiguous_labels:
uri = unambiguous_labels[token]
# get types
tokens_plain.append(ner.get_type(uri, -1))
i = j-1
break
i += 1
for n in range(1, N+1):
for ngram in nltk.ngrams(tokens_plain, n):
result.append((' '.join(ngram), 1))
return result
ngrams = lines.flatMap(generate_ngrams).reduceByKey(lambda n1, n2: n1 + n2).filter(lambda x: x[1] > 1)
def printer(value):
return value[0] + '\t' + str(value[1])
ngrams.map(printer).saveAsTextFile(sys.argv[2])
|
from djangorestframework import mixins
from djangorestframework import parsers
from djangorestframework import resources
from djangorestframework import views
from . import models
##
# The definition of a rating resource, and its corresponding views.
#
class RatingResource (resources.ModelResource):
model = models.Rating
exclude = ['created_datetime', 'updated_datetime']
include = ['question']
def criterion(self, rating):
return rating.criterion.id
def segment(self, rating):
return rating.segment.id
class RatingJSONParser (parsers.JSONParser):
def parse(self, stream):
parsed_data, parsed_files = super(RatingJSONParser, self).parse(stream)
# Backbone.js likes to send up all the data in a model, whether you want
# it to or not. This means that we get attributes that we don't want,
# like `id` and `url`. Here, we're ignoring those attributes.
#
# I don't like this as a solution; I feel like I should be able to
# clean my data on the client before saving (without having to override
# the entire sync method). I may have to extend the Backbone.Model
# class to be a little smarter.
ignore = [u'id', u'url', u'question', u'point']
for key in ignore:
if key in parsed_data:
del parsed_data[key]
return parsed_data, parsed_files
class RatingInstanceView (views.InstanceModelView):
parsers = [parser for parser in parsers.DEFAULT_PARSERS
if parser is not parsers.JSONParser]
parsers.insert(0, RatingJSONParser)
resource = RatingResource
class RatingListView (mixins.PaginatorMixin, views.ListOrCreateModelView):
resource = RatingResource
@property
def queryset(self):
return models.Rating.objects.order_by('segment', 'block_index').select_related()
class BlockRatingResource (RatingResource):
model = models.Rating
exclude = ['created_datetime', 'updated_datetime', 'score', 'segment__id']
include = ['segment', 'question', 'point', 'score__avg']
def segment(self, rating):
return rating['segment__id']
def point(self, rating):
segment = models.Segment.objects.get(id=rating['segment__id'])
block = models.Block(segment, rating['block_index'])
p = block.characteristic_point
return {'lat': p.y, 'lon': p.x}
def question(self, rating):
return rating['criterion__prompt']
class BlockRatingListView (mixins.PaginatorMixin, views.ListModelView):
resource = BlockRatingResource
@property
def queryset(self):
from django.db.models import Avg
return models.Rating.objects.values('segment__id', 'block_index', 'criterion__prompt').annotate(Avg('score')).select_related()
##
# The definition of a survey session resource, and its view.
#
class SurveySessionResource (resources.Resource):
model = models.SurveySession # Can I get away with this?
fields = (
'questions',
'blocks'
)
def questions(self, session):
return session.questions
def blocks(self, session):
blocks_data = []
for block in session.blocks:
p = block.characteristic_point
block_data = {
'segment_id': block.segment.id,
'block_index': block.index,
'point': {'lat': p.y, 'lon': p.x}
}
blocks_data.append(block_data)
return blocks_data
class SurveySessionView (views.View):
def get(self, request):
# block_index = request.GET.get('block_index')
# segment_id = request.GET.get('segment')
blocks = None
# if segment_id is not None and block_index is not None:
# segment = models.Segment.objects.get(segment_id)
# block = models.Block(segment, int(block_index))
survey_session = models.SurveySession(blocks=blocks)
return SurveySessionResource().serialize_model(survey_session)
class SurveySessionListView (views.View):
def get(self, request):
count = int(request.GET.get('count', 5))
return [SurveySessionResource().serialize_model(s)
for s in models.SurveySession.make_surveys(count)]
hack: force django views to only do json rendering because ie is evil
from djangorestframework import mixins
from djangorestframework import parsers
from djangorestframework import renderers
from djangorestframework import resources
from djangorestframework import views
from . import models
##
# The definition of a rating resource, and its corresponding views.
#
class RatingResource (resources.ModelResource):
model = models.Rating
exclude = ['created_datetime', 'updated_datetime']
include = ['question']
def criterion(self, rating):
return rating.criterion.id
def segment(self, rating):
return rating.segment.id
class RatingJSONParser (parsers.JSONParser):
def parse(self, stream):
parsed_data, parsed_files = super(RatingJSONParser, self).parse(stream)
# Backbone.js likes to send up all the data in a model, whether you want
# it to or not. This means that we get attributes that we don't want,
# like `id` and `url`. Here, we're ignoring those attributes.
#
# I don't like this as a solution; I feel like I should be able to
# clean my data on the client before saving (without having to override
# the entire sync method). I may have to extend the Backbone.Model
# class to be a little smarter.
ignore = [u'id', u'url', u'question', u'point']
for key in ignore:
if key in parsed_data:
del parsed_data[key]
return parsed_data, parsed_files
class RatingInstanceView (views.InstanceModelView):
parsers = [parser for parser in parsers.DEFAULT_PARSERS
if parser is not parsers.JSONParser]
parsers.insert(0, RatingJSONParser)
renderers = [renderers.JSONRenderer]
resource = RatingResource
class RatingListView (mixins.PaginatorMixin, views.ListOrCreateModelView):
renderers = [renderers.JSONRenderer]
resource = RatingResource
@property
def queryset(self):
return models.Rating.objects.order_by('segment', 'block_index').select_related()
class BlockRatingResource (RatingResource):
model = models.Rating
exclude = ['created_datetime', 'updated_datetime', 'score', 'segment__id']
include = ['segment', 'question', 'point', 'score__avg']
def segment(self, rating):
return rating['segment__id']
def point(self, rating):
segment = models.Segment.objects.get(id=rating['segment__id'])
block = models.Block(segment, rating['block_index'])
p = block.characteristic_point
return {'lat': p.y, 'lon': p.x}
def question(self, rating):
return rating['criterion__prompt']
class BlockRatingListView (mixins.PaginatorMixin, views.ListModelView):
renderers = [renderers.JSONRenderer]
resource = BlockRatingResource
@property
def queryset(self):
from django.db.models import Avg
return models.Rating.objects.values('segment__id', 'block_index', 'criterion__prompt').annotate(Avg('score')).select_related()
##
# The definition of a survey session resource, and its view.
#
class SurveySessionResource (resources.Resource):
model = models.SurveySession # Can I get away with this?
fields = (
'questions',
'blocks'
)
def questions(self, session):
return session.questions
def blocks(self, session):
blocks_data = []
for block in session.blocks:
p = block.characteristic_point
block_data = {
'segment_id': block.segment.id,
'block_index': block.index,
'point': {'lat': p.y, 'lon': p.x}
}
blocks_data.append(block_data)
return blocks_data
class SurveySessionView (views.View):
renderers = [renderers.JSONRenderer]
def get(self, request):
# block_index = request.GET.get('block_index')
# segment_id = request.GET.get('segment')
blocks = None
# if segment_id is not None and block_index is not None:
# segment = models.Segment.objects.get(segment_id)
# block = models.Block(segment, int(block_index))
survey_session = models.SurveySession(blocks=blocks)
return SurveySessionResource().serialize_model(survey_session)
class SurveySessionListView (views.View):
renderers = [renderers.JSONRenderer]
def get(self, request):
count = int(request.GET.get('count', 5))
return [SurveySessionResource().serialize_model(s)
for s in models.SurveySession.make_surveys(count)]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
from functools import partial
import os
import pkg_resources
import re
import shutil
try:
from babel.core import Locale
except ImportError:
Locale = None
from genshi import HTML
from genshi.builder import tag
from trac.admin.api import IAdminPanelProvider
from trac.core import *
from trac.loader import get_plugin_info, get_plugins_dir
from trac.perm import PermissionSystem, IPermissionRequestor
from trac.util.datefmt import all_timezones, pytz
from trac.util.text import exception_to_unicode, \
unicode_to_base64, unicode_from_base64
from trac.util.translation import _, get_available_locales, ngettext
from trac.web import HTTPNotFound, IRequestHandler
from trac.web.chrome import add_notice, add_stylesheet, \
add_warning, Chrome, INavigationContributor, \
ITemplateProvider
from trac.web.api import is_valid_default_handler
from trac.wiki.formatter import format_to_html
try:
from webadmin import IAdminPageProvider
except ImportError:
IAdminPageProvider = None
class AdminModule(Component):
"""Web administration interface provider and panel manager."""
implements(INavigationContributor, IRequestHandler, ITemplateProvider)
panel_providers = ExtensionPoint(IAdminPanelProvider)
if IAdminPageProvider:
old_providers = ExtensionPoint(IAdminPageProvider)
else:
old_providers = None
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'admin'
def get_navigation_items(self, req):
# The 'Admin' navigation item is only visible if at least one
# admin panel is available
panels, providers = self._get_panels(req)
if panels:
yield 'mainnav', 'admin', tag.a(_("Admin"), href=req.href.admin(),
title=_("Administration"))
# IRequestHandler methods
def match_request(self, req):
match = re.match('/admin(?:/([^/]+)(?:/([^/]+)(?:/(.+))?)?)?$',
req.path_info)
if match:
req.args['cat_id'] = match.group(1)
req.args['panel_id'] = match.group(2)
req.args['path_info'] = match.group(3)
return True
def process_request(self, req):
panels, providers = self._get_panels(req)
if not panels:
raise HTTPNotFound(_("No administration panels available"))
def _panel_order(p1, p2):
if p1[::2] == ('general', 'basics'):
return -1
elif p2[::2] == ('general', 'basics'):
return 1
elif p1[0] == 'general':
if p2[0] == 'general':
return cmp(p1[1:], p2[1:])
return -1
elif p2[0] == 'general':
if p1[0] == 'general':
return cmp(p1[1:], p2[1:])
return 1
return cmp(p1, p2)
panels.sort(_panel_order)
cat_id = req.args.get('cat_id') or panels[0][0]
panel_id = req.args.get('panel_id')
path_info = req.args.get('path_info')
if not panel_id:
try:
panel_id = \
filter(lambda panel: panel[0] == cat_id, panels)[0][2]
except IndexError:
raise HTTPNotFound(_("Unknown administration panel"))
provider = providers.get((cat_id, panel_id), None)
if not provider:
raise HTTPNotFound(_("Unknown administration panel"))
if hasattr(provider, 'render_admin_panel'):
template, data = provider.render_admin_panel(req, cat_id, panel_id,
path_info)
else: # support for legacy WebAdmin panels
data = {}
cstmpl, ct = provider.process_admin_request(req, cat_id, panel_id,
path_info)
output = cstmpl.render()
title = _("Untitled")
for panel in panels:
if (panel[0], panel[2]) == (cat_id, panel_id):
title = panel[3]
data.update({'page_title': title, 'page_body': HTML(output)})
template = 'admin_legacy.html'
data.update({
'active_cat': cat_id, 'active_panel': panel_id,
'panel_href': partial(req.href, 'admin', cat_id, panel_id),
'panels': [{
'category': {'id': panel[0], 'label': panel[1]},
'panel': {'id': panel[2], 'label': panel[3]}
} for panel in panels]
})
add_stylesheet(req, 'common/css/admin.css')
return template, data, None
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.admin', 'templates')]
# Internal methods
def _get_panels(self, req):
"""Return a list of available admin panels."""
panels = []
providers = {}
for provider in self.panel_providers:
p = list(provider.get_admin_panels(req) or [])
for panel in p:
providers[(panel[0], panel[2])] = provider
panels += p
# Add panels contributed by legacy WebAdmin plugins
if IAdminPageProvider:
for provider in self.old_providers:
p = list(provider.get_admin_pages(req))
for page in p:
providers[(page[0], page[2])] = provider
panels += p
return panels, providers
def _save_config(config, req, log, notices=None):
"""Try to save the config, and display either a success notice or a
failure warning.
"""
try:
config.save()
if notices is None:
notices = [_("Your changes have been saved.")]
for notice in notices:
add_notice(req, notice)
except Exception, e:
log.error("Error writing to trac.ini: %s", exception_to_unicode(e))
add_warning(req, _("Error writing to trac.ini, make sure it is "
"writable by the web server. Your changes have "
"not been saved."))
class BasicsAdminPanel(Component):
implements(IAdminPanelProvider)
request_handlers = ExtensionPoint(IRequestHandler)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/basics'):
yield ('general', _("General"), 'basics', _("Basic Settings"))
def render_admin_panel(self, req, cat, page, path_info):
valid_handlers = [hdlr.__class__.__name__
for hdlr in self.request_handlers
if is_valid_default_handler(hdlr)]
if Locale:
locale_ids = get_available_locales()
locales = [Locale.parse(locale) for locale in locale_ids]
# don't use str(locale) to prevent storing expanded locale
# identifier, see #11258
languages = sorted((id, locale.display_name)
for id, locale in zip(locale_ids, locales))
else:
locale_ids, locales, languages = [], [], []
if req.method == 'POST':
for option in ('name', 'url', 'descr'):
self.config.set('project', option, req.args.get(option))
default_handler = req.args.get('default_handler')
if default_handler not in valid_handlers:
default_handler = ''
self.config.set('trac', 'default_handler', default_handler)
default_timezone = req.args.get('default_timezone')
if default_timezone not in all_timezones:
default_timezone = ''
self.config.set('trac', 'default_timezone', default_timezone)
default_language = req.args.get('default_language')
if default_language not in locale_ids:
default_language = ''
self.config.set('trac', 'default_language', default_language)
default_date_format = req.args.get('default_date_format')
if default_date_format != 'iso8601':
default_date_format = ''
self.config.set('trac', 'default_date_format', default_date_format)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
default_handler = self.config.get('trac', 'default_handler')
default_timezone = self.config.get('trac', 'default_timezone')
default_language = self.config.get('trac', 'default_language')
default_date_format = self.config.get('trac', 'default_date_format')
data = {
'default_handler': default_handler,
'handlers': sorted(valid_handlers),
'default_timezone': default_timezone,
'timezones': all_timezones,
'has_pytz': pytz is not None,
'default_language': default_language.replace('-', '_'),
'languages': languages,
'default_date_format': default_date_format,
'has_babel': Locale is not None,
}
Chrome(self.env).add_textarea_grips(req)
return 'admin_basics.html', data
class LoggingAdminPanel(Component):
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/logging'):
yield ('general', _("General"), 'logging', _("Logging"))
def render_admin_panel(self, req, cat, page, path_info):
log_type = self.env.log_type
log_level = self.env.log_level
log_file = self.env.log_file
log_dir = os.path.join(self.env.path, 'log')
log_types = [
dict(name='none', label=_("None"),
selected=log_type == 'none', disabled=False),
dict(name='stderr', label=_("Console"),
selected=log_type == 'stderr', disabled=False),
dict(name='file', label=_("File"),
selected=log_type == 'file', disabled=False),
dict(name='syslog', label=_("Syslog"),
selected=log_type in ('unix', 'syslog'),
disabled=os.name != 'posix'),
dict(name='eventlog', label=_("Windows event log"),
selected=log_type in ('winlog', 'eventlog', 'nteventlog'),
disabled=os.name != 'nt'),
]
log_levels = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']
if req.method == 'POST':
changed = False
new_type = req.args.get('log_type')
if new_type not in [t['name'] for t in log_types]:
raise TracError(
_("Unknown log type %(type)s", type=new_type),
_("Invalid log type")
)
if new_type != log_type:
self.config.set('logging', 'log_type', new_type)
changed = True
log_type = new_type
if log_type == 'none':
self.config.remove('logging', 'log_level')
changed = True
else:
new_level = req.args.get('log_level')
if new_level not in log_levels:
raise TracError(
_("Unknown log level %(level)s", level=new_level),
_("Invalid log level"))
if new_level != log_level:
self.config.set('logging', 'log_level', new_level)
changed = True
log_level = new_level
if log_type == 'file':
new_file = req.args.get('log_file', 'trac.log')
if new_file != log_file:
self.config.set('logging', 'log_file', new_file)
changed = True
log_file = new_file
if not log_file:
raise TracError(_("You must specify a log file"),
_("Missing field"))
else:
self.config.remove('logging', 'log_file')
changed = True
if changed:
_save_config(self.config, req, self.log),
req.redirect(req.href.admin(cat, page))
data = {
'type': log_type, 'types': log_types,
'level': log_level, 'levels': log_levels,
'file': log_file, 'dir': log_dir
}
return 'admin_logging.html', {'log': data}
class PermissionAdminPanel(Component):
implements(IAdminPanelProvider, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['PERMISSION_GRANT', 'PERMISSION_REVOKE']
return actions + [('PERMISSION_ADMIN', actions)]
# IAdminPanelProvider methods
def get_admin_panels(self, req):
perm = req.perm('admin', 'general/perm')
if 'PERMISSION_GRANT' in perm or 'PERMISSION_REVOKE' in perm:
yield ('general', _("General"), 'perm', _("Permissions"))
def render_admin_panel(self, req, cat, page, path_info):
perm = PermissionSystem(self.env)
all_permissions = perm.get_all_permissions()
all_actions = perm.get_actions()
if req.method == 'POST':
subject = req.args.get('subject', '').strip()
target = req.args.get('target', '').strip()
action = req.args.get('action')
group = req.args.get('group', '').strip()
if subject and subject.isupper() or \
group and group.isupper() or \
target and target.isupper():
raise TracError(_("All upper-cased tokens are reserved for "
"permission names."))
# Grant permission to subject
if req.args.get('add') and subject and action:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
if action not in all_actions:
raise TracError(_("Unknown action"))
req.perm.require(action)
if (subject, action) not in all_permissions:
perm.grant_permission(subject, action)
add_notice(req, _("The subject %(subject)s has been "
"granted the permission %(action)s.",
subject=subject, action=action))
req.redirect(req.href.admin(cat, page))
else:
add_warning(req, _("The permission %(action)s was already "
"granted to %(subject)s.",
action=action, subject=subject))
# Add subject to group
elif req.args.get('add') and subject and group:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
for action in perm.get_user_permissions(group):
if not action in all_actions: # plugin disabled?
self.env.log.warn("Adding %s to group %s: "
"Permission %s unavailable, skipping perm check.",
subject, group, action)
else:
req.perm.require(action,
message=_("The subject %(subject)s was not added "
"to the group %(group)s because the "
"group has %(perm)s permission and "
"users cannot grant permissions they "
"don't possess.", subject=subject,
group=group, perm=action))
if (subject, group) not in all_permissions:
perm.grant_permission(subject, group)
add_notice(req, _("The subject %(subject)s has been added "
"to the group %(group)s.",
subject=subject, group=group))
req.redirect(req.href.admin(cat, page))
else:
add_warning(req, _("The subject %(subject)s was already "
"added to the group %(group)s.",
subject=subject, group=group))
# Copy permissions to subject
elif req.args.get('copy') and subject and target:
req.perm.require('PERMISSION_GRANT')
subject_permissions = [i[1] for i in all_permissions
if i[0] == subject and
i[1].isupper()]
if not subject_permissions:
add_warning(req,_("The subject %(subject)s does not "
"have any permissions.",
subject=subject))
for action in subject_permissions:
if (target, action) in all_permissions:
continue
if not action in all_actions: # plugin disabled?
self.env.log.warn("Skipped granting %s to %s: "
"permission unavailable.",
action, target)
else:
if action not in req.perm:
add_warning(req,
_("The permission %(action)s was "
"not granted to %(subject)s "
"because users cannot grant "
"permissions they don't possess.",
action=action, subject=subject))
continue
perm.grant_permission(target, action)
add_notice(req, _("The subject %(subject)s has "
"been granted the permission "
"%(action)s.",
subject=target, action=action))
req.redirect(req.href.admin(cat, page))
# Remove permissions action
elif req.args.get('remove') and req.args.get('sel'):
req.perm('admin', 'general/perm').require('PERMISSION_REVOKE')
sel = req.args.get('sel')
sel = sel if isinstance(sel, list) else [sel]
for key in sel:
subject, action = key.split(':', 1)
subject = unicode_from_base64(subject)
action = unicode_from_base64(action)
if (subject, action) in perm.get_all_permissions():
perm.revoke_permission(subject, action)
add_notice(req, _("The selected permissions have been "
"revoked."))
req.redirect(req.href.admin(cat, page))
perms = [perm for perm in all_permissions if perm[1].isupper()]
groups = [perm for perm in all_permissions if not perm[1].isupper()]
return 'admin_perms.html', {
'actions': all_actions, 'perms': perms, 'groups': groups,
'unicode_to_base64': unicode_to_base64
}
class PluginAdminPanel(Component):
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/plugin'):
yield ('general', _("General"), 'plugin', _("Plugins"))
def render_admin_panel(self, req, cat, page, path_info):
if req.method == 'POST':
if 'install' in req.args:
self._do_install(req)
elif 'uninstall' in req.args:
self._do_uninstall(req)
else:
self._do_update(req)
anchor = ''
if 'plugin' in req.args:
anchor = '#no%d' % (int(req.args.get('plugin')) + 1)
req.redirect(req.href.admin(cat, page) + anchor)
return self._render_view(req)
# Internal methods
def _do_install(self, req):
"""Install a plugin."""
if 'plugin_file' not in req.args:
raise TracError(_("No file uploaded"))
upload = req.args['plugin_file']
if isinstance(upload, unicode) or not upload.filename:
raise TracError(_("No file uploaded"))
plugin_filename = upload.filename.replace('\\', '/').replace(':', '/')
plugin_filename = os.path.basename(plugin_filename)
if not plugin_filename:
raise TracError(_("No file uploaded"))
if not plugin_filename.endswith('.egg') and \
not plugin_filename.endswith('.py'):
raise TracError(_("Uploaded file is not a Python source file or "
"egg"))
target_path = os.path.join(self.env.path, 'plugins', plugin_filename)
if os.path.isfile(target_path):
raise TracError(_("Plugin %(name)s already installed",
name=plugin_filename))
self.log.info("Installing plugin %s", plugin_filename)
flags = os.O_CREAT + os.O_WRONLY + os.O_EXCL
try:
flags += os.O_BINARY
except AttributeError:
# OS_BINARY not available on every platform
pass
with os.fdopen(os.open(target_path, flags, 0666), 'w') as target_file:
shutil.copyfileobj(upload.file, target_file)
self.log.info("Plugin %s installed to %s", plugin_filename,
target_path)
# TODO: Validate that the uploaded file is actually a valid Trac plugin
# Make the environment reset itself on the next request
self.env.config.touch()
def _do_uninstall(self, req):
"""Uninstall a plugin."""
plugin_filename = req.args.get('plugin_filename')
if not plugin_filename:
return
plugin_path = os.path.join(self.env.path, 'plugins', plugin_filename)
if not os.path.isfile(plugin_path):
return
self.log.info("Uninstalling plugin %s", plugin_filename)
os.remove(plugin_path)
# Make the environment reset itself on the next request
self.env.config.touch()
def _do_update(self, req):
"""Update component enablement."""
components = req.args.getlist('component')
enabled = req.args.getlist('enable')
added, removed = [], []
# FIXME: this needs to be more intelligent and minimize multiple
# component names to prefix rules
for component in components:
is_enabled = bool(self.env.is_component_enabled(component))
must_enable = component in enabled
if is_enabled != must_enable:
self.config.set('components', component,
'disabled' if is_enabled else 'enabled')
self.log.info("%sabling component %s",
"Dis" if is_enabled else "En", component)
if must_enable:
added.append(component)
else:
removed.append(component)
if added or removed:
def make_list(items):
parts = [item.rsplit('.', 1) for item in items]
return tag.table(tag.tbody(
tag.tr(tag.td(c, class_='trac-name'),
tag.td('(%s.*)' % m, class_='trac-name'))
for m, c in parts), class_='trac-pluglist')
added.sort()
removed.sort()
notices = []
if removed:
msg = ngettext("The following component has been disabled:",
"The following components have been disabled:",
len(removed))
notices.append(tag(msg, make_list(removed)))
if added:
msg = ngettext("The following component has been enabled:",
"The following components have been enabled:",
len(added))
notices.append(tag(msg, make_list(added)))
_save_config(self.config, req, self.log, notices)
def _render_view(self, req):
plugins = get_plugin_info(self.env, include_core=True)
def safe_wiki_to_html(context, text):
try:
return format_to_html(self.env, context, text)
except Exception, e:
self.log.error("Unable to render component documentation: %s",
exception_to_unicode(e, traceback=True))
return tag.pre(text)
data = {
'plugins': plugins, 'show': req.args.get('show'),
'readonly': not os.access(get_plugins_dir(self.env),
os.F_OK + os.W_OK),
'safe_wiki_to_html': safe_wiki_to_html,
}
return 'admin_plugins.html', data
1.1.2dev: Follow-on to [12624] - renamed variable. Refs #11519.
git-svn-id: 0d96b0c1a6983ccc08b3732614f4d6bfcf9cbb42@12753 af82e41b-90c4-0310-8c96-b1721e28e2e2
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
from functools import partial
import os
import pkg_resources
import re
import shutil
try:
from babel.core import Locale
except ImportError:
Locale = None
from genshi import HTML
from genshi.builder import tag
from trac.admin.api import IAdminPanelProvider
from trac.core import *
from trac.loader import get_plugin_info, get_plugins_dir
from trac.perm import PermissionSystem, IPermissionRequestor
from trac.util.datefmt import all_timezones, pytz
from trac.util.text import exception_to_unicode, \
unicode_to_base64, unicode_from_base64
from trac.util.translation import _, get_available_locales, ngettext
from trac.web import HTTPNotFound, IRequestHandler
from trac.web.chrome import add_notice, add_stylesheet, \
add_warning, Chrome, INavigationContributor, \
ITemplateProvider
from trac.web.api import is_valid_default_handler
from trac.wiki.formatter import format_to_html
try:
from webadmin import IAdminPageProvider
except ImportError:
IAdminPageProvider = None
class AdminModule(Component):
"""Web administration interface provider and panel manager."""
implements(INavigationContributor, IRequestHandler, ITemplateProvider)
panel_providers = ExtensionPoint(IAdminPanelProvider)
if IAdminPageProvider:
old_providers = ExtensionPoint(IAdminPageProvider)
else:
old_providers = None
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'admin'
def get_navigation_items(self, req):
# The 'Admin' navigation item is only visible if at least one
# admin panel is available
panels, providers = self._get_panels(req)
if panels:
yield 'mainnav', 'admin', tag.a(_("Admin"), href=req.href.admin(),
title=_("Administration"))
# IRequestHandler methods
def match_request(self, req):
match = re.match('/admin(?:/([^/]+)(?:/([^/]+)(?:/(.+))?)?)?$',
req.path_info)
if match:
req.args['cat_id'] = match.group(1)
req.args['panel_id'] = match.group(2)
req.args['path_info'] = match.group(3)
return True
def process_request(self, req):
panels, providers = self._get_panels(req)
if not panels:
raise HTTPNotFound(_("No administration panels available"))
def _panel_order(p1, p2):
if p1[::2] == ('general', 'basics'):
return -1
elif p2[::2] == ('general', 'basics'):
return 1
elif p1[0] == 'general':
if p2[0] == 'general':
return cmp(p1[1:], p2[1:])
return -1
elif p2[0] == 'general':
if p1[0] == 'general':
return cmp(p1[1:], p2[1:])
return 1
return cmp(p1, p2)
panels.sort(_panel_order)
cat_id = req.args.get('cat_id') or panels[0][0]
panel_id = req.args.get('panel_id')
path_info = req.args.get('path_info')
if not panel_id:
try:
panel_id = \
filter(lambda panel: panel[0] == cat_id, panels)[0][2]
except IndexError:
raise HTTPNotFound(_("Unknown administration panel"))
provider = providers.get((cat_id, panel_id), None)
if not provider:
raise HTTPNotFound(_("Unknown administration panel"))
if hasattr(provider, 'render_admin_panel'):
template, data = provider.render_admin_panel(req, cat_id, panel_id,
path_info)
else: # support for legacy WebAdmin panels
data = {}
cstmpl, ct = provider.process_admin_request(req, cat_id, panel_id,
path_info)
output = cstmpl.render()
title = _("Untitled")
for panel in panels:
if (panel[0], panel[2]) == (cat_id, panel_id):
title = panel[3]
data.update({'page_title': title, 'page_body': HTML(output)})
template = 'admin_legacy.html'
data.update({
'active_cat': cat_id, 'active_panel': panel_id,
'panel_href': partial(req.href, 'admin', cat_id, panel_id),
'panels': [{
'category': {'id': panel[0], 'label': panel[1]},
'panel': {'id': panel[2], 'label': panel[3]}
} for panel in panels]
})
add_stylesheet(req, 'common/css/admin.css')
return template, data, None
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.admin', 'templates')]
# Internal methods
def _get_panels(self, req):
"""Return a list of available admin panels."""
panels = []
providers = {}
for provider in self.panel_providers:
p = list(provider.get_admin_panels(req) or [])
for panel in p:
providers[(panel[0], panel[2])] = provider
panels += p
# Add panels contributed by legacy WebAdmin plugins
if IAdminPageProvider:
for provider in self.old_providers:
p = list(provider.get_admin_pages(req))
for page in p:
providers[(page[0], page[2])] = provider
panels += p
return panels, providers
def _save_config(config, req, log, notices=None):
"""Try to save the config, and display either a success notice or a
failure warning.
"""
try:
config.save()
if notices is None:
notices = [_("Your changes have been saved.")]
for notice in notices:
add_notice(req, notice)
except Exception, e:
log.error("Error writing to trac.ini: %s", exception_to_unicode(e))
add_warning(req, _("Error writing to trac.ini, make sure it is "
"writable by the web server. Your changes have "
"not been saved."))
class BasicsAdminPanel(Component):
implements(IAdminPanelProvider)
request_handlers = ExtensionPoint(IRequestHandler)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/basics'):
yield ('general', _("General"), 'basics', _("Basic Settings"))
def render_admin_panel(self, req, cat, page, path_info):
valid_handlers = [handler.__class__.__name__
for handler in self.request_handlers
if is_valid_default_handler(handler)]
if Locale:
locale_ids = get_available_locales()
locales = [Locale.parse(locale) for locale in locale_ids]
# don't use str(locale) to prevent storing expanded locale
# identifier, see #11258
languages = sorted((id, locale.display_name)
for id, locale in zip(locale_ids, locales))
else:
locale_ids, locales, languages = [], [], []
if req.method == 'POST':
for option in ('name', 'url', 'descr'):
self.config.set('project', option, req.args.get(option))
default_handler = req.args.get('default_handler')
if default_handler not in valid_handlers:
default_handler = ''
self.config.set('trac', 'default_handler', default_handler)
default_timezone = req.args.get('default_timezone')
if default_timezone not in all_timezones:
default_timezone = ''
self.config.set('trac', 'default_timezone', default_timezone)
default_language = req.args.get('default_language')
if default_language not in locale_ids:
default_language = ''
self.config.set('trac', 'default_language', default_language)
default_date_format = req.args.get('default_date_format')
if default_date_format != 'iso8601':
default_date_format = ''
self.config.set('trac', 'default_date_format', default_date_format)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
default_handler = self.config.get('trac', 'default_handler')
default_timezone = self.config.get('trac', 'default_timezone')
default_language = self.config.get('trac', 'default_language')
default_date_format = self.config.get('trac', 'default_date_format')
data = {
'default_handler': default_handler,
'handlers': sorted(valid_handlers),
'default_timezone': default_timezone,
'timezones': all_timezones,
'has_pytz': pytz is not None,
'default_language': default_language.replace('-', '_'),
'languages': languages,
'default_date_format': default_date_format,
'has_babel': Locale is not None,
}
Chrome(self.env).add_textarea_grips(req)
return 'admin_basics.html', data
class LoggingAdminPanel(Component):
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/logging'):
yield ('general', _("General"), 'logging', _("Logging"))
def render_admin_panel(self, req, cat, page, path_info):
log_type = self.env.log_type
log_level = self.env.log_level
log_file = self.env.log_file
log_dir = os.path.join(self.env.path, 'log')
log_types = [
dict(name='none', label=_("None"),
selected=log_type == 'none', disabled=False),
dict(name='stderr', label=_("Console"),
selected=log_type == 'stderr', disabled=False),
dict(name='file', label=_("File"),
selected=log_type == 'file', disabled=False),
dict(name='syslog', label=_("Syslog"),
selected=log_type in ('unix', 'syslog'),
disabled=os.name != 'posix'),
dict(name='eventlog', label=_("Windows event log"),
selected=log_type in ('winlog', 'eventlog', 'nteventlog'),
disabled=os.name != 'nt'),
]
log_levels = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG']
if req.method == 'POST':
changed = False
new_type = req.args.get('log_type')
if new_type not in [t['name'] for t in log_types]:
raise TracError(
_("Unknown log type %(type)s", type=new_type),
_("Invalid log type")
)
if new_type != log_type:
self.config.set('logging', 'log_type', new_type)
changed = True
log_type = new_type
if log_type == 'none':
self.config.remove('logging', 'log_level')
changed = True
else:
new_level = req.args.get('log_level')
if new_level not in log_levels:
raise TracError(
_("Unknown log level %(level)s", level=new_level),
_("Invalid log level"))
if new_level != log_level:
self.config.set('logging', 'log_level', new_level)
changed = True
log_level = new_level
if log_type == 'file':
new_file = req.args.get('log_file', 'trac.log')
if new_file != log_file:
self.config.set('logging', 'log_file', new_file)
changed = True
log_file = new_file
if not log_file:
raise TracError(_("You must specify a log file"),
_("Missing field"))
else:
self.config.remove('logging', 'log_file')
changed = True
if changed:
_save_config(self.config, req, self.log),
req.redirect(req.href.admin(cat, page))
data = {
'type': log_type, 'types': log_types,
'level': log_level, 'levels': log_levels,
'file': log_file, 'dir': log_dir
}
return 'admin_logging.html', {'log': data}
class PermissionAdminPanel(Component):
implements(IAdminPanelProvider, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
actions = ['PERMISSION_GRANT', 'PERMISSION_REVOKE']
return actions + [('PERMISSION_ADMIN', actions)]
# IAdminPanelProvider methods
def get_admin_panels(self, req):
perm = req.perm('admin', 'general/perm')
if 'PERMISSION_GRANT' in perm or 'PERMISSION_REVOKE' in perm:
yield ('general', _("General"), 'perm', _("Permissions"))
def render_admin_panel(self, req, cat, page, path_info):
perm = PermissionSystem(self.env)
all_permissions = perm.get_all_permissions()
all_actions = perm.get_actions()
if req.method == 'POST':
subject = req.args.get('subject', '').strip()
target = req.args.get('target', '').strip()
action = req.args.get('action')
group = req.args.get('group', '').strip()
if subject and subject.isupper() or \
group and group.isupper() or \
target and target.isupper():
raise TracError(_("All upper-cased tokens are reserved for "
"permission names."))
# Grant permission to subject
if req.args.get('add') and subject and action:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
if action not in all_actions:
raise TracError(_("Unknown action"))
req.perm.require(action)
if (subject, action) not in all_permissions:
perm.grant_permission(subject, action)
add_notice(req, _("The subject %(subject)s has been "
"granted the permission %(action)s.",
subject=subject, action=action))
req.redirect(req.href.admin(cat, page))
else:
add_warning(req, _("The permission %(action)s was already "
"granted to %(subject)s.",
action=action, subject=subject))
# Add subject to group
elif req.args.get('add') and subject and group:
req.perm('admin', 'general/perm').require('PERMISSION_GRANT')
for action in perm.get_user_permissions(group):
if not action in all_actions: # plugin disabled?
self.env.log.warn("Adding %s to group %s: "
"Permission %s unavailable, skipping perm check.",
subject, group, action)
else:
req.perm.require(action,
message=_("The subject %(subject)s was not added "
"to the group %(group)s because the "
"group has %(perm)s permission and "
"users cannot grant permissions they "
"don't possess.", subject=subject,
group=group, perm=action))
if (subject, group) not in all_permissions:
perm.grant_permission(subject, group)
add_notice(req, _("The subject %(subject)s has been added "
"to the group %(group)s.",
subject=subject, group=group))
req.redirect(req.href.admin(cat, page))
else:
add_warning(req, _("The subject %(subject)s was already "
"added to the group %(group)s.",
subject=subject, group=group))
# Copy permissions to subject
elif req.args.get('copy') and subject and target:
req.perm.require('PERMISSION_GRANT')
subject_permissions = [i[1] for i in all_permissions
if i[0] == subject and
i[1].isupper()]
if not subject_permissions:
add_warning(req,_("The subject %(subject)s does not "
"have any permissions.",
subject=subject))
for action in subject_permissions:
if (target, action) in all_permissions:
continue
if not action in all_actions: # plugin disabled?
self.env.log.warn("Skipped granting %s to %s: "
"permission unavailable.",
action, target)
else:
if action not in req.perm:
add_warning(req,
_("The permission %(action)s was "
"not granted to %(subject)s "
"because users cannot grant "
"permissions they don't possess.",
action=action, subject=subject))
continue
perm.grant_permission(target, action)
add_notice(req, _("The subject %(subject)s has "
"been granted the permission "
"%(action)s.",
subject=target, action=action))
req.redirect(req.href.admin(cat, page))
# Remove permissions action
elif req.args.get('remove') and req.args.get('sel'):
req.perm('admin', 'general/perm').require('PERMISSION_REVOKE')
sel = req.args.get('sel')
sel = sel if isinstance(sel, list) else [sel]
for key in sel:
subject, action = key.split(':', 1)
subject = unicode_from_base64(subject)
action = unicode_from_base64(action)
if (subject, action) in perm.get_all_permissions():
perm.revoke_permission(subject, action)
add_notice(req, _("The selected permissions have been "
"revoked."))
req.redirect(req.href.admin(cat, page))
perms = [perm for perm in all_permissions if perm[1].isupper()]
groups = [perm for perm in all_permissions if not perm[1].isupper()]
return 'admin_perms.html', {
'actions': all_actions, 'perms': perms, 'groups': groups,
'unicode_to_base64': unicode_to_base64
}
class PluginAdminPanel(Component):
implements(IAdminPanelProvider)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm('admin', 'general/plugin'):
yield ('general', _("General"), 'plugin', _("Plugins"))
def render_admin_panel(self, req, cat, page, path_info):
if req.method == 'POST':
if 'install' in req.args:
self._do_install(req)
elif 'uninstall' in req.args:
self._do_uninstall(req)
else:
self._do_update(req)
anchor = ''
if 'plugin' in req.args:
anchor = '#no%d' % (int(req.args.get('plugin')) + 1)
req.redirect(req.href.admin(cat, page) + anchor)
return self._render_view(req)
# Internal methods
def _do_install(self, req):
"""Install a plugin."""
if 'plugin_file' not in req.args:
raise TracError(_("No file uploaded"))
upload = req.args['plugin_file']
if isinstance(upload, unicode) or not upload.filename:
raise TracError(_("No file uploaded"))
plugin_filename = upload.filename.replace('\\', '/').replace(':', '/')
plugin_filename = os.path.basename(plugin_filename)
if not plugin_filename:
raise TracError(_("No file uploaded"))
if not plugin_filename.endswith('.egg') and \
not plugin_filename.endswith('.py'):
raise TracError(_("Uploaded file is not a Python source file or "
"egg"))
target_path = os.path.join(self.env.path, 'plugins', plugin_filename)
if os.path.isfile(target_path):
raise TracError(_("Plugin %(name)s already installed",
name=plugin_filename))
self.log.info("Installing plugin %s", plugin_filename)
flags = os.O_CREAT + os.O_WRONLY + os.O_EXCL
try:
flags += os.O_BINARY
except AttributeError:
# OS_BINARY not available on every platform
pass
with os.fdopen(os.open(target_path, flags, 0666), 'w') as target_file:
shutil.copyfileobj(upload.file, target_file)
self.log.info("Plugin %s installed to %s", plugin_filename,
target_path)
# TODO: Validate that the uploaded file is actually a valid Trac plugin
# Make the environment reset itself on the next request
self.env.config.touch()
def _do_uninstall(self, req):
"""Uninstall a plugin."""
plugin_filename = req.args.get('plugin_filename')
if not plugin_filename:
return
plugin_path = os.path.join(self.env.path, 'plugins', plugin_filename)
if not os.path.isfile(plugin_path):
return
self.log.info("Uninstalling plugin %s", plugin_filename)
os.remove(plugin_path)
# Make the environment reset itself on the next request
self.env.config.touch()
def _do_update(self, req):
"""Update component enablement."""
components = req.args.getlist('component')
enabled = req.args.getlist('enable')
added, removed = [], []
# FIXME: this needs to be more intelligent and minimize multiple
# component names to prefix rules
for component in components:
is_enabled = bool(self.env.is_component_enabled(component))
must_enable = component in enabled
if is_enabled != must_enable:
self.config.set('components', component,
'disabled' if is_enabled else 'enabled')
self.log.info("%sabling component %s",
"Dis" if is_enabled else "En", component)
if must_enable:
added.append(component)
else:
removed.append(component)
if added or removed:
def make_list(items):
parts = [item.rsplit('.', 1) for item in items]
return tag.table(tag.tbody(
tag.tr(tag.td(c, class_='trac-name'),
tag.td('(%s.*)' % m, class_='trac-name'))
for m, c in parts), class_='trac-pluglist')
added.sort()
removed.sort()
notices = []
if removed:
msg = ngettext("The following component has been disabled:",
"The following components have been disabled:",
len(removed))
notices.append(tag(msg, make_list(removed)))
if added:
msg = ngettext("The following component has been enabled:",
"The following components have been enabled:",
len(added))
notices.append(tag(msg, make_list(added)))
_save_config(self.config, req, self.log, notices)
def _render_view(self, req):
plugins = get_plugin_info(self.env, include_core=True)
def safe_wiki_to_html(context, text):
try:
return format_to_html(self.env, context, text)
except Exception, e:
self.log.error("Unable to render component documentation: %s",
exception_to_unicode(e, traceback=True))
return tag.pre(text)
data = {
'plugins': plugins, 'show': req.args.get('show'),
'readonly': not os.access(get_plugins_dir(self.env),
os.F_OK + os.W_OK),
'safe_wiki_to_html': safe_wiki_to_html,
}
return 'admin_plugins.html', data
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class MailingTrace(models.Model):
""" MailingTrace models the statistics collected about emails. Those statistics
are stored in a separated model and table to avoid bloating the mail_mail table
with statistics values. This also allows to delete emails send with mass mailing
without loosing the statistics about them. """
_name = 'mailing.trace'
_description = 'Mailing Statistics'
_rec_name = 'id'
_order = 'scheduled DESC'
trace_type = fields.Selection([('mail', 'Mail')], string='Type', default='mail', required=True)
display_name = fields.Char(compute='_compute_display_name')
# mail data
mail_mail_id = fields.Many2one('mail.mail', string='Mail', index=True)
mail_mail_id_int = fields.Integer(
string='Mail ID (tech)',
help='ID of the related mail_mail. This field is an integer field because '
'the related mail_mail can be deleted separately from its statistics. '
'However the ID is needed for several action and controllers.',
index=True,
)
email = fields.Char(string="Email", help="Normalized email address")
message_id = fields.Char(string='Message-ID')
# document
model = fields.Char(string='Document model')
res_id = fields.Integer(string='Document ID')
# campaign / wave data
mass_mailing_id = fields.Many2one('mailing.mailing', string='Mailing', index=True, ondelete='cascade')
campaign_id = fields.Many2one(
related='mass_mailing_id.campaign_id',
string='Campaign',
store=True, readonly=True, index=True)
# Bounce and tracking
ignored = fields.Datetime(help='Date when the email has been invalidated. '
'Invalid emails are blacklisted, opted-out or invalid email format')
scheduled = fields.Datetime(help='Date when the email has been created', default=fields.Datetime.now)
sent = fields.Datetime(help='Date when the email has been sent')
exception = fields.Datetime(help='Date of technical error leading to the email not being sent')
opened = fields.Datetime(help='Date when the email has been opened the first time')
replied = fields.Datetime(help='Date when this email has been replied for the first time.')
bounced = fields.Datetime(help='Date when this email has bounced.')
# Link tracking
links_click_ids = fields.One2many('link.tracker.click', 'mailing_trace_id', string='Links click')
clicked = fields.Datetime(help='Date when customer clicked on at least one tracked link')
# Status
state = fields.Selection(compute="_compute_state",
selection=[('outgoing', 'Outgoing'),
('exception', 'Exception'),
('sent', 'Sent'),
('opened', 'Opened'),
('replied', 'Replied'),
('bounced', 'Bounced'),
('ignored', 'Ignored')], store=True)
failure_type = fields.Selection(selection=[
("SMTP", "Connection failed (outgoing mail server problem)"),
("RECIPIENT", "Invalid email address"),
("BOUNCE", "Email address rejected by destination"),
("UNKNOWN", "Unknown error"),
], string='Failure type')
state_update = fields.Datetime(compute="_compute_state", string='State Update',
help='Last state update of the mail',
store=True)
@api.depends('trace_type', 'mass_mailing_id')
def _compute_display_name(self):
for trace in self:
trace.display_name = '%s: %s (%s)' % (trace.trace_type, trace.mass_mailing_id.name, trace.id)
@api.depends('sent', 'opened', 'clicked', 'replied', 'bounced', 'exception', 'ignored')
def _compute_state(self):
self.update({'state_update': fields.Datetime.now()})
for stat in self:
if stat.ignored:
stat.state = 'ignored'
elif stat.exception:
stat.state = 'exception'
elif stat.opened or stat.clicked:
stat.state = 'opened'
elif stat.replied:
stat.state = 'replied'
elif stat.bounced:
stat.state = 'bounced'
elif stat.sent:
stat.state = 'sent'
else:
stat.state = 'outgoing'
@api.model_create_multi
def create(self, values_list):
for values in values_list:
if 'mail_mail_id' in values:
values['mail_mail_id_int'] = values['mail_mail_id']
return super(MailingTrace, self).create(values_list)
def _get_records(self, mail_mail_ids=None, mail_message_ids=None, domain=None):
if not self.ids and mail_mail_ids:
base_domain = [('mail_mail_id_int', 'in', mail_mail_ids)]
elif not self.ids and mail_message_ids:
base_domain = [('message_id', 'in', mail_message_ids)]
else:
base_domain = [('id', 'in', self.ids)]
if domain:
base_domain = ['&'] + domain + base_domain
return self.search(base_domain)
def set_opened(self, mail_mail_ids=None, mail_message_ids=None):
traces = self._get_records(mail_mail_ids, mail_message_ids, [('opened', '=', False)])
traces.write({'opened': fields.Datetime.now(), 'bounced': False})
return traces
def set_clicked(self, mail_mail_ids=None, mail_message_ids=None):
traces = self._get_records(mail_mail_ids, mail_message_ids, [('clicked', '=', False)])
traces.write({'clicked': fields.Datetime.now()})
return traces
def set_replied(self, mail_mail_ids=None, mail_message_ids=None):
traces = self._get_records(mail_mail_ids, mail_message_ids, [('replied', '=', False)])
traces.write({'replied': fields.Datetime.now()})
return traces
def set_bounced(self, mail_mail_ids=None, mail_message_ids=None):
traces = self._get_records(mail_mail_ids, mail_message_ids, [('bounced', '=', False), ('opened', '=', False)])
traces.write({'bounced': fields.Datetime.now()})
return traces
[FIX] mass_mailing: fix statistic replied state computation
Trace status is a computed field based on other fields. Those store the
datetime at which a specific action has been performed: opened, replied,
bounced, ... depending on those fields status of trace is computed.
Previous backport fixed mailing trace management to match heuristics used in
12.0 . However when replying, trace is considered as open instead of replied.
It is now fixed as we consider replied being more important than opened.
Task ID 2257717
PR #51445
Forward-port-of: #51319
Forward-port-of: #51247
X-original-commit: 8df2b0b8baabb35fdd44f5cbcc1cf72ebb03cc79
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class MailingTrace(models.Model):
""" MailingTrace models the statistics collected about emails. Those statistics
are stored in a separated model and table to avoid bloating the mail_mail table
with statistics values. This also allows to delete emails send with mass mailing
without loosing the statistics about them. """
_name = 'mailing.trace'
_description = 'Mailing Statistics'
_rec_name = 'id'
_order = 'scheduled DESC'
trace_type = fields.Selection([('mail', 'Mail')], string='Type', default='mail', required=True)
display_name = fields.Char(compute='_compute_display_name')
# mail data
mail_mail_id = fields.Many2one('mail.mail', string='Mail', index=True)
mail_mail_id_int = fields.Integer(
string='Mail ID (tech)',
help='ID of the related mail_mail. This field is an integer field because '
'the related mail_mail can be deleted separately from its statistics. '
'However the ID is needed for several action and controllers.',
index=True,
)
email = fields.Char(string="Email", help="Normalized email address")
message_id = fields.Char(string='Message-ID')
# document
model = fields.Char(string='Document model')
res_id = fields.Integer(string='Document ID')
# campaign / wave data
mass_mailing_id = fields.Many2one('mailing.mailing', string='Mailing', index=True, ondelete='cascade')
campaign_id = fields.Many2one(
related='mass_mailing_id.campaign_id',
string='Campaign',
store=True, readonly=True, index=True)
# Bounce and tracking
ignored = fields.Datetime(help='Date when the email has been invalidated. '
'Invalid emails are blacklisted, opted-out or invalid email format')
scheduled = fields.Datetime(help='Date when the email has been created', default=fields.Datetime.now)
sent = fields.Datetime(help='Date when the email has been sent')
exception = fields.Datetime(help='Date of technical error leading to the email not being sent')
opened = fields.Datetime(help='Date when the email has been opened the first time')
replied = fields.Datetime(help='Date when this email has been replied for the first time.')
bounced = fields.Datetime(help='Date when this email has bounced.')
# Link tracking
links_click_ids = fields.One2many('link.tracker.click', 'mailing_trace_id', string='Links click')
clicked = fields.Datetime(help='Date when customer clicked on at least one tracked link')
# Status
state = fields.Selection(compute="_compute_state",
selection=[('outgoing', 'Outgoing'),
('exception', 'Exception'),
('sent', 'Sent'),
('opened', 'Opened'),
('replied', 'Replied'),
('bounced', 'Bounced'),
('ignored', 'Ignored')], store=True)
failure_type = fields.Selection(selection=[
("SMTP", "Connection failed (outgoing mail server problem)"),
("RECIPIENT", "Invalid email address"),
("BOUNCE", "Email address rejected by destination"),
("UNKNOWN", "Unknown error"),
], string='Failure type')
state_update = fields.Datetime(compute="_compute_state", string='State Update',
help='Last state update of the mail',
store=True)
@api.depends('trace_type', 'mass_mailing_id')
def _compute_display_name(self):
for trace in self:
trace.display_name = '%s: %s (%s)' % (trace.trace_type, trace.mass_mailing_id.name, trace.id)
@api.depends('sent', 'opened', 'clicked', 'replied', 'bounced', 'exception', 'ignored')
def _compute_state(self):
self.update({'state_update': fields.Datetime.now()})
for stat in self:
if stat.ignored:
stat.state = 'ignored'
elif stat.exception:
stat.state = 'exception'
elif stat.replied:
stat.state = 'replied'
elif stat.opened or stat.clicked:
stat.state = 'opened'
elif stat.bounced:
stat.state = 'bounced'
elif stat.sent:
stat.state = 'sent'
else:
stat.state = 'outgoing'
@api.model_create_multi
def create(self, values_list):
for values in values_list:
if 'mail_mail_id' in values:
values['mail_mail_id_int'] = values['mail_mail_id']
return super(MailingTrace, self).create(values_list)
def _get_records(self, mail_mail_ids=None, mail_message_ids=None, domain=None):
if not self.ids and mail_mail_ids:
base_domain = [('mail_mail_id_int', 'in', mail_mail_ids)]
elif not self.ids and mail_message_ids:
base_domain = [('message_id', 'in', mail_message_ids)]
else:
base_domain = [('id', 'in', self.ids)]
if domain:
base_domain = ['&'] + domain + base_domain
return self.search(base_domain)
def set_opened(self, mail_mail_ids=None, mail_message_ids=None):
traces = self._get_records(mail_mail_ids, mail_message_ids, [('opened', '=', False)])
traces.write({'opened': fields.Datetime.now(), 'bounced': False})
return traces
def set_clicked(self, mail_mail_ids=None, mail_message_ids=None):
traces = self._get_records(mail_mail_ids, mail_message_ids, [('clicked', '=', False)])
traces.write({'clicked': fields.Datetime.now()})
return traces
def set_replied(self, mail_mail_ids=None, mail_message_ids=None):
traces = self._get_records(mail_mail_ids, mail_message_ids, [('replied', '=', False)])
traces.write({'replied': fields.Datetime.now()})
return traces
def set_bounced(self, mail_mail_ids=None, mail_message_ids=None):
traces = self._get_records(mail_mail_ids, mail_message_ids, [('bounced', '=', False), ('opened', '=', False)])
traces.write({'bounced': fields.Datetime.now()})
return traces
|
from roglick.dungeon.maps import SimpleDungeon,ConwayDungeon
from roglick.dungeon import Feature,features
from roglick.engine import event,random
from roglick.events import MoveEvent,ClimbDownEvent,ClimbUpEvent
from roglick.components import PositionComponent
class WorldManager(object):
"""The central manager for maintaining all world data."""
def __init__(self, entity_manager):
self._em = entity_manager
self._dungeon = DungeonManager(self, random.get_int())
@property
def current_dungeon(self):
return self._dungeon
@property
def current_map(self):
return self.current_dungeon.current_level.map
@event.event_handler(MoveEvent, ClimbDownEvent, ClimbUpEvent)
def map_handler(self, myevent):
self.current_dungeon.map_handler(myevent)
class DungeonManager(object):
"""This object manages all data for a particular dungeon."""
def __init__(self, world_manager, dungeon_seed):
self._wm = world_manager
self._random = random.Random(dungeon_seed)
self._seeds = []
self._stairs = []
self._current_level = 0
self.create_level()
def create_level(self):
seed = self.get_level_seed(self._current_level)
self._level = LevelManager(self, seed)
maps = 1
while not self._level_valid():
# Didn't get a valid map, re-build it
print("Discarding map {} and trying again...".format(maps))
self._level.map.make_map()
maps += 1
stairs = self.get_level_stairs(self._current_level)
self._level.add_stairs_down(stairs=stairs[0])
if 0 < self._current_level:
self._level.add_stairs_up(stairs=stairs[1])
@property
def current_level(self):
return self._level
def _level_valid(self):
# Ensure all stairs go on valid tiles
stairs = self.get_level_stairs(self._current_level)
for stair_set in stairs:
for x,y in stair_set:
if not self._level.map.tiles[x][y].is_passable:
return False
return True
def get_level_stairs(self, level):
if level < 0:
return [None,None]
while len(self._stairs) <= level:
self._stairs.append(self.build_level_stairs(len(self._stairs)))
return self._stairs[level]
def build_level_stairs(self, level):
if level == 0:
up_stairs = []
else:
up_stairs = self._stairs[level - 1][0]
down_stairs = []
for n in range(2,5):
x,y = self.current_level.map.get_random_cell()
down_stairs.append((x,y))
return [down_stairs, up_stairs]
def get_level_seed(self, level):
while len(self._seeds) <= level:
self._seeds.append(self._random.get_int())
return self._seeds[level]
def map_handler(self, myevent):
self.current_level.map_handler(myevent)
# Now we can try to handle stairs, if not stopped
if myevent.propagate:
pcpos = self._wm._em.get_component(self._wm._em.pc, PositionComponent)
if myevent.__class__ == ClimbDownEvent:
self._current_level += 1
self.create_level()
# Now make sure we don't embed the PC in a wall...
# TODO: We'll want to make sure stairs line up, else regen map
pcpos.x,pcpos.y = self.current_level.map.get_random_cell()
if myevent.__class__ == ClimbUpEvent:
self._current_level = max(0, self._current_level - 1)
self.create_level()
# Now make sure we don't embed the PC in a wall...
# TODO: We'll want to make sure stairs line up, else regen map
pcpos.x,pcpos.y = self.current_level.map.get_random_cell()
class LevelManager(object):
"""This object manages a single level of a dungeon."""
def __init__(self, dungeon_manager, level_seed):
self._dm = dungeon_manager
self._seed = level_seed
self._random = random.Random(self._seed)
self._stairs_down = []
self._stairs_up = []
if self._random.flip_coin():
self._map = SimpleDungeon(80, 50, self._random)
else:
self._map = ConwayDungeon(80, 50, self._random)
def add_stairs_down(self, stairs):
self._stairs_down = stairs
for x,y in stairs:
self._map.tiles[x][y].add_feature(features.StairsDown)
def add_stairs_up(self, stairs):
self._stairs_up = stairs
for x,y in stairs:
self._map.tiles[x][y].add_feature(features.StairsUp)
@property
def map(self):
return self._map
@property
def stairs_down(self):
return self._stairs_down
@property
def stairs_up(self):
return self._stairs_up
def map_handler(self, myevent):
epos = self._dm._wm._em.get_component(myevent.entity_source, PositionComponent)
if myevent.__class__ == MoveEvent:
tx = epos.x + myevent.dx
ty = epos.y + myevent.dy
if not self.map.tiles[tx][ty].is_passable:
# Illegal move, prevent this event from continuing
myevent.stop()
elif myevent.__class__ == ClimbDownEvent:
if self.map.tiles[epos.x][epos.y] != features.StairsDown:
# Can't descend without stairs, dummy!
myevent.stop()
elif myevent.__class__ == ClimbUpEvent:
if self.map.tiles[epos.x][epos.y] != features.StairsUp:
# Can't ascend without stairs, dummy!
myevent.stop()
Improved stair generation, map validation
from roglick.dungeon.maps import SimpleDungeon,ConwayDungeon
from roglick.dungeon import Feature,features
from roglick.engine import event,random
from roglick.events import MoveEvent,ClimbDownEvent,ClimbUpEvent
from roglick.components import PositionComponent
class WorldManager(object):
"""The central manager for maintaining all world data."""
def __init__(self, entity_manager):
self._em = entity_manager
self._dungeon = DungeonManager(self, random.get_int())
@property
def current_dungeon(self):
return self._dungeon
@property
def current_map(self):
return self.current_dungeon.current_level.map
@event.event_handler(MoveEvent, ClimbDownEvent, ClimbUpEvent)
def map_handler(self, myevent):
self.current_dungeon.map_handler(myevent)
class DungeonManager(object):
"""This object manages all data for a particular dungeon."""
def __init__(self, world_manager, dungeon_seed):
self._wm = world_manager
self._random = random.Random(dungeon_seed)
self._seeds = []
self._stairs = []
self._current_level = 0
self.create_level()
def create_level(self):
seed = self.get_level_seed(self._current_level)
self._level = LevelManager(self, seed)
maps = 1
while not self._level_valid():
# Didn't get a valid map, re-build it
print("Discarding map {} and trying again...".format(maps))
self._level.map.make_map()
maps += 1
stairs_up = self.get_level_stairs(self._current_level-1)
stairs_down = self.get_level_stairs(self._current_level)
self._level.add_stairs_up(stairs=stairs_up)
self._level.add_stairs_down(stairs=stairs_down)
@property
def current_level(self):
return self._level
def _level_valid(self):
# Ensure all stairs go on valid tiles
stairs = self.get_level_stairs(self._current_level-1)
for x,y in stairs:
if not self._level.map.tiles[x][y].is_passable:
return False
return True
def get_level_stairs(self, level):
if level < 0:
return []
while len(self._stairs) <= level:
self._stairs.append(self.build_level_stairs(len(self._stairs)))
return self._stairs[level]
def build_level_stairs(self, level):
down_stairs = []
for n in range(2,5):
x,y = self.current_level.map.get_random_cell()
down_stairs.append((x,y))
return down_stairs
def get_level_seed(self, level):
while len(self._seeds) <= level:
self._seeds.append(self._random.get_int())
return self._seeds[level]
def map_handler(self, myevent):
self.current_level.map_handler(myevent)
# Now we can try to handle stairs, if not stopped
if myevent.propagate:
pcpos = self._wm._em.get_component(self._wm._em.pc, PositionComponent)
if myevent.__class__ == ClimbDownEvent:
self._current_level += 1
self.create_level()
# Now make sure we don't embed the PC in a wall...
# TODO: We'll want to make sure stairs line up, else regen map
pcpos.x,pcpos.y = self.current_level.map.get_random_cell()
if myevent.__class__ == ClimbUpEvent:
self._current_level = max(0, self._current_level - 1)
self.create_level()
# Now make sure we don't embed the PC in a wall...
# TODO: We'll want to make sure stairs line up, else regen map
pcpos.x,pcpos.y = self.current_level.map.get_random_cell()
class LevelManager(object):
"""This object manages a single level of a dungeon."""
def __init__(self, dungeon_manager, level_seed):
self._dm = dungeon_manager
self._seed = level_seed
self._random = random.Random(self._seed)
self._stairs_down = []
self._stairs_up = []
if self._random.flip_coin():
self._map = SimpleDungeon(80, 50, self._random)
else:
self._map = ConwayDungeon(80, 50, self._random)
def add_stairs_down(self, stairs):
self._stairs_down = stairs
for x,y in stairs:
self._map.tiles[x][y].add_feature(features.StairsDown)
def add_stairs_up(self, stairs):
self._stairs_up = stairs
for x,y in stairs:
self._map.tiles[x][y].add_feature(features.StairsUp)
@property
def map(self):
return self._map
@property
def stairs_down(self):
return self._stairs_down
@property
def stairs_up(self):
return self._stairs_up
def map_handler(self, myevent):
epos = self._dm._wm._em.get_component(myevent.entity_source, PositionComponent)
if myevent.__class__ == MoveEvent:
tx = epos.x + myevent.dx
ty = epos.y + myevent.dy
if not self.map.tiles[tx][ty].is_passable:
# Illegal move, prevent this event from continuing
myevent.stop()
elif myevent.__class__ == ClimbDownEvent:
if self.map.tiles[epos.x][epos.y] != features.StairsDown:
# Can't descend without stairs, dummy!
myevent.stop()
elif myevent.__class__ == ClimbUpEvent:
if self.map.tiles[epos.x][epos.y] != features.StairsUp:
# Can't ascend without stairs, dummy!
myevent.stop()
|
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
import io
import itertools
import os
import time
import warnings
from . import browserlib
from . import _glyph_functions as gf
from .document import Document
from .embed import notebook_div, file_html, autoload_server
from .objects import Axis, Grid, GridPlot, Legend, Plot
from .palettes import brewer
from .plotting_helpers import (
get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat
)
from .resources import Resources
from .session import DEFAULT_SERVER_URL, Session
from .utils import decode_utf8, publish_display_data
# extra imports -- just thigns to add to 'from plotting import *'
from bokeh.objects import ColumnDataSource
_default_document = Document()
_default_session = None
_default_file = None
_default_notebook = None
def curdoc():
''' Return the current document.
Returns:
doc : the current default document object.
'''
try:
"""This is used when we need to call the plotting API from within
the server, within a request context. (Applets do this for example)
in this case you still want the API to work but you don't want
to use the global module level document
"""
from flask import request
doc = request.bokeh_server_document
logger.debug("returning config from flask request")
return doc
except (ImportError, RuntimeError, AttributeError):
return _default_document
def curplot():
''' Return the current default plot object.
Returns:
plot : the current default plot (or None)
'''
return curdoc().curplot()
def cursession():
''' Return the current session, if there is one.
Returns:
session : the current default session object (or None)
'''
return _default_session
def reset_output():
''' Deactivate all currently active output modes.
Subsequent calls to show() will not render until a new output mode is
activated.
Returns:
None
'''
global _default_document
global _default_session
global _default_file
global _default_notebook
_default_document = Document()
_default_session = None
_default_file = None
_default_notebook = None
def hold(value=True):
''' Set or clear the plot hold status on the current document.
This is a convenience function that acts on the current document, and is equivalent to curdoc().hold(...)
Args:
value (bool, optional) : whether hold should be turned on or off (default: True)
Returns:
None
'''
curdoc().hold(value)
def figure(**kwargs):
''' Activate a new figure for plotting.
All subsequent plotting operations will affect the new figure.
This function accepts all plot style keyword parameters.
Returns:
None
'''
curdoc().figure(**kwargs)
def output_server(docname, session=None, url="default", name=None):
""" Cause plotting commands to automatically persist plots to a Bokeh server.
Can use explicitly provided Session for persistence, or the default
session.
Args:
docname (str) : name of document to push on Bokeh server
An existing documents with the same name will be overwritten.
session (Session, optional) : An explicit session to use (default: None)
If session is None, use the default session
url (str, optianal) : URL of the Bokeh server (default: "default")
if url is "default" use session.DEFAULT_SERVER_URL
name (str, optional) :
if name is None, use the server URL as the name
Additional keyword arguments like **username**, **userapikey**,
and **base_url** can also be supplied.
Returns:
None
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
.. note:: Calling this function will replaces any existing default Server session
"""
global _default_session
if url == "default":
url = DEFAULT_SERVER_URL
if name is None:
name = url
if not session:
if not _default_session:
_default_session = Session(name=name, root_url=url)
session = _default_session
session.use_doc(docname)
session.load_document(curdoc())
def output_notebook(url=None, docname=None, session=None, name=None,
force=False):
if session or url or name:
if docname is None:
docname = "IPython Session at %s" % time.ctime()
output_server(docname, url=url, session=session, name=name)
else:
from . import load_notebook
load_notebook(force=force)
global _default_notebook
_default_notebook = True
def output_file(filename, title="Bokeh Plot", autosave=False, mode="inline", root_dir=None):
""" Outputs to a static HTML file.
.. note:: This file will be overwritten each time show() or save() is invoked.
Args:
autosave (bool, optional) : whether to automatically save (default: False)
If **autosave** is True, then every time plot() or one of the other
visual functions is called, this causes the file to be saved. If it
is False, then the file is only saved upon calling show().
mode (str, optional) : how to inlude BokehJS (default: "inline")
**mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'.
In the 'relative(-dev)' case, **root_dir** can be specified to indicate the
base directory from which the path to the various static files should be
computed.
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
"""
global _default_file
_default_file = {
'filename' : filename,
'resources' : Resources(mode=mode, root_dir=root_dir, minified=False),
'autosave' : autosave,
'title' : title,
}
if os.path.isfile(filename):
print("Session output file '%s' already exists, will be overwritten." % filename)
def show(obj=None, browser=None, new="tab", url=None):
""" 'shows' a plot object or the current plot, by auto-raising the window or tab
displaying the current plot (for file/server output modes) or displaying
it in an output cell (IPython notebook).
Args:
obj (plot object, optional): it accepts a plot object and just shows it.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows specifying
which browser to display in, e.g. "safari", "firefox", "opera",
"windows-default". (See the webbrowser module documentation in the
standard lib for more details.)
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
"""
filename = _default_file['filename'] if _default_file else None
session = cursession()
notebook = _default_notebook
# Map our string argument to the webbrowser.open argument
new_param = {'tab': 2, 'window': 1}[new]
controller = browserlib.get_browser_controller(browser=browser)
if obj is None:
if notebook:
plot = curplot()
else:
plot = curdoc()
else:
plot = obj
if not plot:
warnings.warn("No current plot to show. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
if notebook and session:
push(session=session)
snippet = autoload_server(plot, cursession())
publish_display_data({'text/html': snippet})
elif notebook:
publish_display_data({'text/html': notebook_div(plot)})
elif session:
push()
if url:
controller.open(url, new=new_param)
else:
controller.open(session.object_link(curdoc().context))
elif filename:
save(filename, obj=plot)
controller.open("file://" + os.path.abspath(filename), new=new_param)
def save(filename=None, resources=None, obj=None):
""" Updates the file with the data for the current document.
If a filename is supplied, or output_file(...) has been called, this will
save the plot to the given filename.
Args:
filename (str, optional) : filename to save document under (default: None)
if `filename` is None, the current output_file(...) filename is used if present
resources (Resources, optional) : BokehJS resource config to use
if `resources` is None, the current default resource config is used
obj (Document or Plot object, optional)
if provided, then this is the object to save instead of curdoc()
and its curplot()
Returns:
None
"""
if filename is None and _default_file:
filename = _default_file['filename']
if resources is None and _default_file:
resources = _default_file['resources']
if not filename:
warnings.warn("save() called but no filename was supplied and output_file(...) was never called, nothing saved")
return
if not resources:
warnings.warn("save() called but no resources was supplied and output_file(...) was never called, nothing saved")
return
if obj is None:
if not curplot():
warnings.warn("No current plot to save. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
doc = curdoc()
elif isinstance(obj, Plot):
doc = Document()
doc.add(obj)
elif isinstance(obj, Document):
doc = obj
else:
raise RuntimeError("Unable to save object of type '%s'" % type(obj))
html = file_html(doc, resources, _default_file['title'])
with io.open(filename, "w", encoding="utf-8") as f:
f.write(decode_utf8(html))
def push(session=None, document=None):
""" Updates the server with the data for the current document.
Args:
session (Sesion, optional) : filename to save document under (default: None)
if `sessiokn` is None, the current output_server(...) session is used if present
document (Document, optional) : BokehJS document to push
if `document` is None, the current default document is pushed
Returns:
None
"""
if not session:
session = cursession()
if not document:
document = curdoc()
if session:
return session.store_document(curdoc())
else:
warnings.warn("push() called but no session was supplied and output_server(...) was never called, nothing pushed")
def _doc_wrap(func):
extra_doc = "\nThis is a convenience function that acts on the current document, and is equivalent to curdoc().%s(...)" % func.__name__
func.__doc__ = getattr(gf, func.__name__).__doc__ + extra_doc
return func
def _plot_function(__func__, *args, **kwargs):
retval = __func__(curdoc(), *args, **kwargs)
if cursession() and curdoc().autostore:
push()
if _default_file and _default_file['autosave']:
save()
return retval
@_doc_wrap
def annular_wedge(x, y, inner_radius, outer_radius, start_angle, end_angle, **kwargs):
return _plot_function(gf.annular_wedge, x, y, inner_radius, outer_radius, start_angle, end_angle, **kwargs)
@_doc_wrap
def annulus(x, y, inner_radius, outer_radius, **kwargs):
return _plot_function(gf.annulus, x, y, inner_radius, outer_radius, **kwargs)
@_doc_wrap
def arc(x, y, radius, start_angle, end_angle, **kwargs):
return _plot_function(gf.arc, x, y, radius, start_angle, end_angle, **kwargs)
@_doc_wrap
def asterisk(x, y, **kwargs):
return _plot_function(gf.asterisk, x, y, **kwargs)
@_doc_wrap
def bezier(x0, y0, x1, y1, cx0, cy0, cx1, cy1, **kwargs):
return _plot_function(gf.bezier, x0, y0, x1, y1, cx0, cy0, cx1, cy1, **kwargs)
@_doc_wrap
def circle(x, y, **kwargs):
return _plot_function(gf.circle, x, y, **kwargs)
@_doc_wrap
def circle_cross(x, y, **kwargs):
return _plot_function(gf.circle_cross, x, y, **kwargs)
@_doc_wrap
def circle_x(x, y, **kwargs):
return _plot_function(gf.circle_x, x, y, **kwargs)
@_doc_wrap
def cross(x, y, **kwargs):
return _plot_function(gf.cross, x, y, **kwargs)
@_doc_wrap
def diamond(x, y, **kwargs):
return _plot_function(gf.diamond, x, y, **kwargs)
@_doc_wrap
def diamond_cross(x, y, **kwargs):
return _plot_function(gf.diamond_cross, x, y, **kwargs)
@_doc_wrap
def image(image, x, y, dw, dh, palette, **kwargs):
return _plot_function(gf.image, image, x, y, dw, dh, palette, **kwargs)
@_doc_wrap
def image_rgba(image, x, y, dw, dh, **kwargs):
return _plot_function(gf.image_rgba, image, x, y, dw, dh, **kwargs)
@_doc_wrap
def image_url(url, x, y, angle, **kwargs):
return _plot_function(gf.image_url, url, x, y, angle, **kwargs)
@_doc_wrap
def inverted_triangle(x, y, **kwargs):
return _plot_function(gf.inverted_triangle, x, y, **kwargs)
@_doc_wrap
def line(x, y, **kwargs):
return _plot_function(gf.line, x, y, **kwargs)
@_doc_wrap
def multi_line(xs, ys, **kwargs):
return _plot_function(gf.multi_line, xs, ys, **kwargs)
@_doc_wrap
def oval(x, y, width, height, **kwargs):
return _plot_function(gf.oval, x, y, width, height, **kwargs)
@_doc_wrap
def patch(x, y, **kwargs):
return _plot_function(gf.patch, x, y, **kwargs)
@_doc_wrap
def patches(xs, ys, **kwargs):
return _plot_function(gf.patches, xs, ys, **kwargs)
@_doc_wrap
def quad(left, right, top, bottom, **kwargs):
return _plot_function(gf.quad, left, right, top, bottom, **kwargs)
@_doc_wrap
def quadratic(x0, y0, x1, y1, cx, cy, **kwargs):
return _plot_function(gf.quadratic, x0, y0, x1, y1, cx, cy, **kwargs)
@_doc_wrap
def ray(x, y, length, angle, **kwargs):
return _plot_function(gf.ray, x, y, length, angle, **kwargs)
@_doc_wrap
def rect(x, y, width, height, **kwargs):
return _plot_function(gf.rect, x, y, width, height, **kwargs)
@_doc_wrap
def segment(x0, y0, x1, y1, **kwargs):
return _plot_function(gf.segment, x0, y0, x1, y1, **kwargs)
@_doc_wrap
def square(x, y, **kwargs):
return _plot_function(gf.square, x, y, **kwargs)
@_doc_wrap
def square_cross(x, y, **kwargs):
return _plot_function(gf.square_cross, x, y, **kwargs)
@_doc_wrap
def square_x(x, y, **kwargs):
return _plot_function(gf.square_x, x, y, **kwargs)
@_doc_wrap
def text(x, y, text, angle, **kwargs):
return _plot_function(gf.text, x, y, text, angle, **kwargs)
@_doc_wrap
def triangle(x, y, **kwargs):
return _plot_function(gf.triangle, x, y, **kwargs)
@_doc_wrap
def wedge(x, y, radius, start_angle, end_angle, **kwargs):
return _plot_function(gf.wedge, x, y, radius, start_angle, end_angle, **kwargs)
@_doc_wrap
def x(x, y, **kwargs):
return _plot_function(gf.x, x, y, **kwargs)
_marker_types = {
"asterisk": asterisk,
"circle": circle,
"circle_cross": circle_cross,
"circle_x": circle_x,
"cross": cross,
"diamond": diamond,
"diamond_cross": diamond_cross,
"inverted_triangle": inverted_triangle,
"square": square,
"square_x": square_x,
"square_cross": square_cross,
"triangle": triangle,
"x": x,
"*": asterisk,
"+": cross,
"o": circle,
"ox": circle_x,
"o+": circle_cross,
}
def markers():
""" Prints a list of valid marker types for scatter()
Returns:
None
"""
print(list(sorted(_marker_types.keys())))
_color_fields = set(["color", "fill_color", "line_color"])
_alpha_fields = set(["alpha", "fill_alpha", "line_alpha"])
def scatter(*args, **kwargs):
""" Creates a scatter plot of the given x and y items.
Args:
*args : The data to plot. Can be of several forms:
(X, Y)
Two 1D arrays or iterables
(XNAME, YNAME)
Two bokeh DataSource/ColumnsRef
marker (str, optional): a valid marker_type, defaults to "circle"
color (color value, optional): shorthand to set both fill and line color
All the :ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties` are
also accepted as keyword parameters.
Examples:
>>> scatter([1,2,3],[4,5,6], fill_color="red")
>>> scatter("data1", "data2", source=data_source, ...)
"""
ds = kwargs.get("source", None)
names, datasource = _handle_1d_data_args(args, datasource=ds)
kwargs["source"] = datasource
markertype = kwargs.get("marker", "circle")
# TODO: How to handle this? Just call curplot()?
if not len(_color_fields.intersection(set(kwargs.keys()))):
kwargs['color'] = get_default_color()
if not len(_alpha_fields.intersection(set(kwargs.keys()))):
kwargs['alpha'] = get_default_alpha()
if markertype not in _marker_types:
raise ValueError("Invalid marker type '%s'. Use markers() to see a list of valid marker types." % markertype)
return _marker_types[markertype](*args, **kwargs)
def gridplot(plot_arrangement, name=None):
""" Generate a plot that arranges several subplots into a grid.
Args:
plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid
name (str) : name for this plot
.. note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]]
Returns:
grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>`
"""
grid = GridPlot(children=plot_arrangement)
if name:
grid._id = name
# Walk the plot_arrangement and remove them from the plotcontext,
# so they don't show up twice
subplots = itertools.chain.from_iterable(plot_arrangement)
curdoc().context.children = list(set(curdoc().context.children) - set(subplots))
curdoc().add(grid)
curdoc()._current_plot = grid # TODO (bev) don't use private attrs
if _default_session:
push()
if _default_file and _default_file['autosave']:
save()
return grid
def _axis(*sides):
p = curplot()
if p is None:
return None
objs = []
for s in sides:
objs.extend(getattr(p, s, []))
axis = [obj for obj in objs if isinstance(obj, Axis)]
return _list_attr_splat(axis)
def xaxis():
""" Get the current axis objects
Returns:
Returns axis object or splattable list of x-axis objects on the current plot
"""
return _axis("above", "below")
def yaxis():
""" Get the current `y` axis object(s)
Returns:
Returns y-axis object or splattable list of y-axis objects on the current plot
"""
return _axis("left", "right")
def axis():
""" Get the current `x` axis object(s)
Returns:
Returns x-axis object or splattable list of axis objects on the current plot
"""
return _list_attr_splat(xaxis() + yaxis())
def legend():
""" Get the current :class:`legend <bokeh.objects.Legend>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
p = curplot()
if p is None:
return None
legends = [obj for obj in p.renderers if isinstance(obj, Legend)]
return _list_attr_splat(legends)
def _grid(dimension):
p = curplot()
if p is None:
return None
grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==dimension]
return _list_attr_splat(grid)
def xgrid():
""" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
return _grid(0)
def ygrid():
""" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns y-grid object or splattable list of y-grid objects on the current plot
"""
return _grid(1)
def grid():
""" Get the current :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns grid object or splattable list of grid objects on the current plot
"""
return _list_attr_splat(xgrid() + ygrid())
def load_object(obj):
"""updates object from the server
"""
cursession().load_object(obj, curdoc())
Adding support for additional kwargs to gridplot()
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
import io
import itertools
import os
import time
import warnings
from . import browserlib
from . import _glyph_functions as gf
from .document import Document
from .embed import notebook_div, file_html, autoload_server
from .objects import Axis, Grid, GridPlot, Legend, Plot
from .palettes import brewer
from .plotting_helpers import (
get_default_color, get_default_alpha, _handle_1d_data_args, _list_attr_splat
)
from .resources import Resources
from .session import DEFAULT_SERVER_URL, Session
from .utils import decode_utf8, publish_display_data
# extra imports -- just thigns to add to 'from plotting import *'
from bokeh.objects import ColumnDataSource
_default_document = Document()
_default_session = None
_default_file = None
_default_notebook = None
def curdoc():
''' Return the current document.
Returns:
doc : the current default document object.
'''
try:
"""This is used when we need to call the plotting API from within
the server, within a request context. (Applets do this for example)
in this case you still want the API to work but you don't want
to use the global module level document
"""
from flask import request
doc = request.bokeh_server_document
logger.debug("returning config from flask request")
return doc
except (ImportError, RuntimeError, AttributeError):
return _default_document
def curplot():
''' Return the current default plot object.
Returns:
plot : the current default plot (or None)
'''
return curdoc().curplot()
def cursession():
''' Return the current session, if there is one.
Returns:
session : the current default session object (or None)
'''
return _default_session
def reset_output():
''' Deactivate all currently active output modes.
Subsequent calls to show() will not render until a new output mode is
activated.
Returns:
None
'''
global _default_document
global _default_session
global _default_file
global _default_notebook
_default_document = Document()
_default_session = None
_default_file = None
_default_notebook = None
def hold(value=True):
''' Set or clear the plot hold status on the current document.
This is a convenience function that acts on the current document, and is equivalent to curdoc().hold(...)
Args:
value (bool, optional) : whether hold should be turned on or off (default: True)
Returns:
None
'''
curdoc().hold(value)
def figure(**kwargs):
''' Activate a new figure for plotting.
All subsequent plotting operations will affect the new figure.
This function accepts all plot style keyword parameters.
Returns:
None
'''
curdoc().figure(**kwargs)
def output_server(docname, session=None, url="default", name=None):
""" Cause plotting commands to automatically persist plots to a Bokeh server.
Can use explicitly provided Session for persistence, or the default
session.
Args:
docname (str) : name of document to push on Bokeh server
An existing documents with the same name will be overwritten.
session (Session, optional) : An explicit session to use (default: None)
If session is None, use the default session
url (str, optianal) : URL of the Bokeh server (default: "default")
if url is "default" use session.DEFAULT_SERVER_URL
name (str, optional) :
if name is None, use the server URL as the name
Additional keyword arguments like **username**, **userapikey**,
and **base_url** can also be supplied.
Returns:
None
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
.. note:: Calling this function will replaces any existing default Server session
"""
global _default_session
if url == "default":
url = DEFAULT_SERVER_URL
if name is None:
name = url
if not session:
if not _default_session:
_default_session = Session(name=name, root_url=url)
session = _default_session
session.use_doc(docname)
session.load_document(curdoc())
def output_notebook(url=None, docname=None, session=None, name=None,
force=False):
if session or url or name:
if docname is None:
docname = "IPython Session at %s" % time.ctime()
output_server(docname, url=url, session=session, name=name)
else:
from . import load_notebook
load_notebook(force=force)
global _default_notebook
_default_notebook = True
def output_file(filename, title="Bokeh Plot", autosave=False, mode="inline", root_dir=None):
""" Outputs to a static HTML file.
.. note:: This file will be overwritten each time show() or save() is invoked.
Args:
autosave (bool, optional) : whether to automatically save (default: False)
If **autosave** is True, then every time plot() or one of the other
visual functions is called, this causes the file to be saved. If it
is False, then the file is only saved upon calling show().
mode (str, optional) : how to inlude BokehJS (default: "inline")
**mode** can be 'inline', 'cdn', 'relative(-dev)' or 'absolute(-dev)'.
In the 'relative(-dev)' case, **root_dir** can be specified to indicate the
base directory from which the path to the various static files should be
computed.
.. note:: Generally, this should be called at the beginning of an
interactive session or the top of a script.
"""
global _default_file
_default_file = {
'filename' : filename,
'resources' : Resources(mode=mode, root_dir=root_dir, minified=False),
'autosave' : autosave,
'title' : title,
}
if os.path.isfile(filename):
print("Session output file '%s' already exists, will be overwritten." % filename)
def show(obj=None, browser=None, new="tab", url=None):
""" 'shows' a plot object or the current plot, by auto-raising the window or tab
displaying the current plot (for file/server output modes) or displaying
it in an output cell (IPython notebook).
Args:
obj (plot object, optional): it accepts a plot object and just shows it.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows specifying
which browser to display in, e.g. "safari", "firefox", "opera",
"windows-default". (See the webbrowser module documentation in the
standard lib for more details.)
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
"""
filename = _default_file['filename'] if _default_file else None
session = cursession()
notebook = _default_notebook
# Map our string argument to the webbrowser.open argument
new_param = {'tab': 2, 'window': 1}[new]
controller = browserlib.get_browser_controller(browser=browser)
if obj is None:
if notebook:
plot = curplot()
else:
plot = curdoc()
else:
plot = obj
if not plot:
warnings.warn("No current plot to show. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
if notebook and session:
push(session=session)
snippet = autoload_server(plot, cursession())
publish_display_data({'text/html': snippet})
elif notebook:
publish_display_data({'text/html': notebook_div(plot)})
elif session:
push()
if url:
controller.open(url, new=new_param)
else:
controller.open(session.object_link(curdoc().context))
elif filename:
save(filename, obj=plot)
controller.open("file://" + os.path.abspath(filename), new=new_param)
def save(filename=None, resources=None, obj=None):
""" Updates the file with the data for the current document.
If a filename is supplied, or output_file(...) has been called, this will
save the plot to the given filename.
Args:
filename (str, optional) : filename to save document under (default: None)
if `filename` is None, the current output_file(...) filename is used if present
resources (Resources, optional) : BokehJS resource config to use
if `resources` is None, the current default resource config is used
obj (Document or Plot object, optional)
if provided, then this is the object to save instead of curdoc()
and its curplot()
Returns:
None
"""
if filename is None and _default_file:
filename = _default_file['filename']
if resources is None and _default_file:
resources = _default_file['resources']
if not filename:
warnings.warn("save() called but no filename was supplied and output_file(...) was never called, nothing saved")
return
if not resources:
warnings.warn("save() called but no resources was supplied and output_file(...) was never called, nothing saved")
return
if obj is None:
if not curplot():
warnings.warn("No current plot to save. Use renderer functions (circle, rect, etc.) to create a current plot (see http://bokeh.pydata.org/index.html)")
return
doc = curdoc()
elif isinstance(obj, Plot):
doc = Document()
doc.add(obj)
elif isinstance(obj, Document):
doc = obj
else:
raise RuntimeError("Unable to save object of type '%s'" % type(obj))
html = file_html(doc, resources, _default_file['title'])
with io.open(filename, "w", encoding="utf-8") as f:
f.write(decode_utf8(html))
def push(session=None, document=None):
""" Updates the server with the data for the current document.
Args:
session (Sesion, optional) : filename to save document under (default: None)
if `sessiokn` is None, the current output_server(...) session is used if present
document (Document, optional) : BokehJS document to push
if `document` is None, the current default document is pushed
Returns:
None
"""
if not session:
session = cursession()
if not document:
document = curdoc()
if session:
return session.store_document(curdoc())
else:
warnings.warn("push() called but no session was supplied and output_server(...) was never called, nothing pushed")
def _doc_wrap(func):
extra_doc = "\nThis is a convenience function that acts on the current document, and is equivalent to curdoc().%s(...)" % func.__name__
func.__doc__ = getattr(gf, func.__name__).__doc__ + extra_doc
return func
def _plot_function(__func__, *args, **kwargs):
retval = __func__(curdoc(), *args, **kwargs)
if cursession() and curdoc().autostore:
push()
if _default_file and _default_file['autosave']:
save()
return retval
@_doc_wrap
def annular_wedge(x, y, inner_radius, outer_radius, start_angle, end_angle, **kwargs):
return _plot_function(gf.annular_wedge, x, y, inner_radius, outer_radius, start_angle, end_angle, **kwargs)
@_doc_wrap
def annulus(x, y, inner_radius, outer_radius, **kwargs):
return _plot_function(gf.annulus, x, y, inner_radius, outer_radius, **kwargs)
@_doc_wrap
def arc(x, y, radius, start_angle, end_angle, **kwargs):
return _plot_function(gf.arc, x, y, radius, start_angle, end_angle, **kwargs)
@_doc_wrap
def asterisk(x, y, **kwargs):
return _plot_function(gf.asterisk, x, y, **kwargs)
@_doc_wrap
def bezier(x0, y0, x1, y1, cx0, cy0, cx1, cy1, **kwargs):
return _plot_function(gf.bezier, x0, y0, x1, y1, cx0, cy0, cx1, cy1, **kwargs)
@_doc_wrap
def circle(x, y, **kwargs):
return _plot_function(gf.circle, x, y, **kwargs)
@_doc_wrap
def circle_cross(x, y, **kwargs):
return _plot_function(gf.circle_cross, x, y, **kwargs)
@_doc_wrap
def circle_x(x, y, **kwargs):
return _plot_function(gf.circle_x, x, y, **kwargs)
@_doc_wrap
def cross(x, y, **kwargs):
return _plot_function(gf.cross, x, y, **kwargs)
@_doc_wrap
def diamond(x, y, **kwargs):
return _plot_function(gf.diamond, x, y, **kwargs)
@_doc_wrap
def diamond_cross(x, y, **kwargs):
return _plot_function(gf.diamond_cross, x, y, **kwargs)
@_doc_wrap
def image(image, x, y, dw, dh, palette, **kwargs):
return _plot_function(gf.image, image, x, y, dw, dh, palette, **kwargs)
@_doc_wrap
def image_rgba(image, x, y, dw, dh, **kwargs):
return _plot_function(gf.image_rgba, image, x, y, dw, dh, **kwargs)
@_doc_wrap
def image_url(url, x, y, angle, **kwargs):
return _plot_function(gf.image_url, url, x, y, angle, **kwargs)
@_doc_wrap
def inverted_triangle(x, y, **kwargs):
return _plot_function(gf.inverted_triangle, x, y, **kwargs)
@_doc_wrap
def line(x, y, **kwargs):
return _plot_function(gf.line, x, y, **kwargs)
@_doc_wrap
def multi_line(xs, ys, **kwargs):
return _plot_function(gf.multi_line, xs, ys, **kwargs)
@_doc_wrap
def oval(x, y, width, height, **kwargs):
return _plot_function(gf.oval, x, y, width, height, **kwargs)
@_doc_wrap
def patch(x, y, **kwargs):
return _plot_function(gf.patch, x, y, **kwargs)
@_doc_wrap
def patches(xs, ys, **kwargs):
return _plot_function(gf.patches, xs, ys, **kwargs)
@_doc_wrap
def quad(left, right, top, bottom, **kwargs):
return _plot_function(gf.quad, left, right, top, bottom, **kwargs)
@_doc_wrap
def quadratic(x0, y0, x1, y1, cx, cy, **kwargs):
return _plot_function(gf.quadratic, x0, y0, x1, y1, cx, cy, **kwargs)
@_doc_wrap
def ray(x, y, length, angle, **kwargs):
return _plot_function(gf.ray, x, y, length, angle, **kwargs)
@_doc_wrap
def rect(x, y, width, height, **kwargs):
return _plot_function(gf.rect, x, y, width, height, **kwargs)
@_doc_wrap
def segment(x0, y0, x1, y1, **kwargs):
return _plot_function(gf.segment, x0, y0, x1, y1, **kwargs)
@_doc_wrap
def square(x, y, **kwargs):
return _plot_function(gf.square, x, y, **kwargs)
@_doc_wrap
def square_cross(x, y, **kwargs):
return _plot_function(gf.square_cross, x, y, **kwargs)
@_doc_wrap
def square_x(x, y, **kwargs):
return _plot_function(gf.square_x, x, y, **kwargs)
@_doc_wrap
def text(x, y, text, angle, **kwargs):
return _plot_function(gf.text, x, y, text, angle, **kwargs)
@_doc_wrap
def triangle(x, y, **kwargs):
return _plot_function(gf.triangle, x, y, **kwargs)
@_doc_wrap
def wedge(x, y, radius, start_angle, end_angle, **kwargs):
return _plot_function(gf.wedge, x, y, radius, start_angle, end_angle, **kwargs)
@_doc_wrap
def x(x, y, **kwargs):
return _plot_function(gf.x, x, y, **kwargs)
_marker_types = {
"asterisk": asterisk,
"circle": circle,
"circle_cross": circle_cross,
"circle_x": circle_x,
"cross": cross,
"diamond": diamond,
"diamond_cross": diamond_cross,
"inverted_triangle": inverted_triangle,
"square": square,
"square_x": square_x,
"square_cross": square_cross,
"triangle": triangle,
"x": x,
"*": asterisk,
"+": cross,
"o": circle,
"ox": circle_x,
"o+": circle_cross,
}
def markers():
""" Prints a list of valid marker types for scatter()
Returns:
None
"""
print(list(sorted(_marker_types.keys())))
_color_fields = set(["color", "fill_color", "line_color"])
_alpha_fields = set(["alpha", "fill_alpha", "line_alpha"])
def scatter(*args, **kwargs):
""" Creates a scatter plot of the given x and y items.
Args:
*args : The data to plot. Can be of several forms:
(X, Y)
Two 1D arrays or iterables
(XNAME, YNAME)
Two bokeh DataSource/ColumnsRef
marker (str, optional): a valid marker_type, defaults to "circle"
color (color value, optional): shorthand to set both fill and line color
All the :ref:`userguide_objects_line_properties` and :ref:`userguide_objects_fill_properties` are
also accepted as keyword parameters.
Examples:
>>> scatter([1,2,3],[4,5,6], fill_color="red")
>>> scatter("data1", "data2", source=data_source, ...)
"""
ds = kwargs.get("source", None)
names, datasource = _handle_1d_data_args(args, datasource=ds)
kwargs["source"] = datasource
markertype = kwargs.get("marker", "circle")
# TODO: How to handle this? Just call curplot()?
if not len(_color_fields.intersection(set(kwargs.keys()))):
kwargs['color'] = get_default_color()
if not len(_alpha_fields.intersection(set(kwargs.keys()))):
kwargs['alpha'] = get_default_alpha()
if markertype not in _marker_types:
raise ValueError("Invalid marker type '%s'. Use markers() to see a list of valid marker types." % markertype)
return _marker_types[markertype](*args, **kwargs)
def gridplot(plot_arrangement, name=None, **kwargs):
""" Generate a plot that arranges several subplots into a grid.
Args:
plot_arrangement (list[:class:`Plot <bokeh.objects.Plot>`]) : plots to arrange in a grid
name (str) : name for this plot
**kwargs: additional attributes to pass in to GridPlot() constructor
.. note:: `plot_arrangement` can be nested, e.g [[p1, p2], [p3, p4]]
Returns:
grid_plot: the current :class:`GridPlot <bokeh.objects.GridPlot>`
"""
grid = GridPlot(children=plot_arrangement, **kwargs)
if name:
grid._id = name
# Walk the plot_arrangement and remove them from the plotcontext,
# so they don't show up twice
subplots = itertools.chain.from_iterable(plot_arrangement)
curdoc().context.children = list(set(curdoc().context.children) - set(subplots))
curdoc().add(grid)
curdoc()._current_plot = grid # TODO (bev) don't use private attrs
if _default_session:
push()
if _default_file and _default_file['autosave']:
save()
return grid
def _axis(*sides):
p = curplot()
if p is None:
return None
objs = []
for s in sides:
objs.extend(getattr(p, s, []))
axis = [obj for obj in objs if isinstance(obj, Axis)]
return _list_attr_splat(axis)
def xaxis():
""" Get the current axis objects
Returns:
Returns axis object or splattable list of x-axis objects on the current plot
"""
return _axis("above", "below")
def yaxis():
""" Get the current `y` axis object(s)
Returns:
Returns y-axis object or splattable list of y-axis objects on the current plot
"""
return _axis("left", "right")
def axis():
""" Get the current `x` axis object(s)
Returns:
Returns x-axis object or splattable list of axis objects on the current plot
"""
return _list_attr_splat(xaxis() + yaxis())
def legend():
""" Get the current :class:`legend <bokeh.objects.Legend>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
p = curplot()
if p is None:
return None
legends = [obj for obj in p.renderers if isinstance(obj, Legend)]
return _list_attr_splat(legends)
def _grid(dimension):
p = curplot()
if p is None:
return None
grid = [obj for obj in p.renderers if isinstance(obj, Grid) and obj.dimension==dimension]
return _list_attr_splat(grid)
def xgrid():
""" Get the current `x` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns legend object or splattable list of legend objects on the current plot
"""
return _grid(0)
def ygrid():
""" Get the current `y` :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns y-grid object or splattable list of y-grid objects on the current plot
"""
return _grid(1)
def grid():
""" Get the current :class:`grid <bokeh.objects.Grid>` object(s)
Returns:
Returns grid object or splattable list of grid objects on the current plot
"""
return _list_attr_splat(xgrid() + ygrid())
def load_object(obj):
"""updates object from the server
"""
cursession().load_object(obj, curdoc())
|
import unittest
import pycqed as pq
import numpy as np
import os
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_flipping_analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_flipping_analysis(self):
# this test is based on an experiment with a known
# added detuning in the amplitude. The test tests that the analysis
# works for a range of known scale factors.
# 20% detuning only works for coarse
self._check_scaling('20170726_164507', 0.8, 1)
self._check_scaling('20170726_164536', 0.9, 1)
self._check_scaling('20170726_164550', 0.9, 1)
self._check_scaling('20170726_164605', 0.95, 2)
self._check_scaling('20170726_164619', 0.95, 2)
self._check_scaling('20170726_164635', 0.99, 2)
self._check_scaling('20170726_164649', 0.99, 2)
self._check_scaling('20170726_164704', 1, 2)
self._check_scaling('20170726_164718', 1, 2)
self._check_scaling('20170726_164733', 1.01, 2)
self._check_scaling('20170726_164747', 1.01, 2)
self._check_scaling('20170726_164802', 1.05, 1)
self._check_scaling('20170726_164816', 1.05, 1)
self._check_scaling('20170726_164831', 1.1, 1)
self._check_scaling('20170726_164845', 1.1, 1)
# 20% detuning only works for coarse
self._check_scaling('20170726_164901', 1.2, 1)
# Test running it once with showing the initial fit
ma.FlippingAnalysis(t_start='20170726_164901',
options_dict={'plot_init': True})
def _check_scaling(self, timestamp, known_detuning, places):
a = ma.FlippingAnalysis(t_start=timestamp)
s = a.get_scale_factor()
self.assertAlmostEqual(s*known_detuning, 1, places=places)
print('Scale factor {:.4f} known detuning {:.4f}'.format(
s, known_detuning))
class Test_CZ_1QPhaseCal_Analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_zero_phase_diff_intersect(self):
a = ma.CZ_1QPhaseCal_Analysis(t_start='20171126_180251',
options_dict={'ch_idx':1})
self.assertAlmostEqual(a.get_zero_phase_diff_intersect(),
.058, places=3)
a = ma.CZ_1QPhaseCal_Analysis(t_start='20171126_181327',
options_dict={'ch_idx':0})
self.assertAlmostEqual(a.get_zero_phase_diff_intersect(),
.1218, places=3)
class Test_Idling_Error_Rate_Analyisis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
@unittest.skip("TODO: fix this test")
def test_error_rates_vary_N2(self):
a=ma.Idling_Error_Rate_Analyisis(
t_start ='20180210_181633',
options_dict={'close_figs':True, 'vary_N2': True})
expected_dict = {'A': 0.41685563870942149,
'N1': 1064.7100611208791,
'N2': 3644.550952436859,
'offset': 0.52121402524448934}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit +'].best_values[key], value, decimal=2)
expected_dict = {'A': -0.13013585779457398,
'N1': 1138.3895116903586,
'N2': 601415.64642756886,
'offset': 0.14572799876310505}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit 0'].best_values[key], value, decimal=2)
expected_dict = {'A': 0.74324542246644376,
'N1': 939.61974247762646,
'N2': 3566698.2870284803,
'offset': 0.18301612896797623}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit 1'].best_values[key], value, decimal=2)
def test_error_rates_fixed_N2(self):
a=ma.Idling_Error_Rate_Analyisis(
t_start ='20180210_181633',
options_dict={'close_figs':True, 'vary_N2': False})
expected_dict = {'A': 0.43481425072120633,
'N1': 1034.9644095297574,
'N2': 1e+21,
'offset': 0.50671519356947314}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit +'].best_values[key], value, decimal=2)
expected_dict = {'A': -0.13013614484482647,
'N1': 1138.3896694924019,
'N2': 1e+21,
'offset': 0.1457282565842071}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit 0'].best_values[key], value, decimal=2)
expected_dict = {'A': 0.7432454022744126,
'N1': 939.61870748568992,
'N2': 1e+21,
'offset': 0.18301632862249007}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit 1'].best_values[key], value, decimal=2)
class Test_Conditional_Oscillation_Analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_condition_oscillation_extracted_pars(self):
a=ma.Conditional_Oscillation_Analysis(t_start='20180130_191229')
extracted = np.array([a.proc_data_dict['phi_cond'][0],
a.proc_data_dict['phi_cond'][1],
a.proc_data_dict['phi_0'][0],
a.proc_data_dict['phi_0'][1],
a.proc_data_dict['phi_1'][0],
a.proc_data_dict['phi_1'][1],
a.proc_data_dict['osc_amp_0'][0],
a.proc_data_dict['osc_amp_0'][1],
a.proc_data_dict['osc_amp_1'][0],
a.proc_data_dict['osc_amp_1'][1],
a.proc_data_dict['offs_diff'][0],
a.proc_data_dict['offs_diff'][1],
a.proc_data_dict['osc_offs_0'][0],
a.proc_data_dict['osc_offs_0'][1],
a.proc_data_dict['osc_offs_1'][0],
a.proc_data_dict['osc_offs_1'][1]])
expected = np.array([178.48651885251698,
2.9898741913646272,
3.3113606223925696,
1.691099641377918,
181.79787947490954,
2.4656702300023334,
0.3236975712917689,
0.009554015995104392,
0.27746120839251875,
0.011940273429767528,
0.00150761346297007,
0.010813168390439071,
0.4868083822944365,
0.0067557093031373455,
0.4883159957574066,
0.0084430446197739737])
np.testing.assert_almost_equal(extracted, expected)
updated test decimal
import unittest
import pycqed as pq
import numpy as np
import os
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_flipping_analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_flipping_analysis(self):
# this test is based on an experiment with a known
# added detuning in the amplitude. The test tests that the analysis
# works for a range of known scale factors.
# 20% detuning only works for coarse
self._check_scaling('20170726_164507', 0.8, 1)
self._check_scaling('20170726_164536', 0.9, 1)
self._check_scaling('20170726_164550', 0.9, 1)
self._check_scaling('20170726_164605', 0.95, 2)
self._check_scaling('20170726_164619', 0.95, 2)
self._check_scaling('20170726_164635', 0.99, 2)
self._check_scaling('20170726_164649', 0.99, 2)
self._check_scaling('20170726_164704', 1, 2)
self._check_scaling('20170726_164718', 1, 2)
self._check_scaling('20170726_164733', 1.01, 2)
self._check_scaling('20170726_164747', 1.01, 2)
self._check_scaling('20170726_164802', 1.05, 1)
self._check_scaling('20170726_164816', 1.05, 1)
self._check_scaling('20170726_164831', 1.1, 1)
self._check_scaling('20170726_164845', 1.1, 1)
# 20% detuning only works for coarse
self._check_scaling('20170726_164901', 1.2, 1)
# Test running it once with showing the initial fit
ma.FlippingAnalysis(t_start='20170726_164901',
options_dict={'plot_init': True})
def _check_scaling(self, timestamp, known_detuning, places):
a = ma.FlippingAnalysis(t_start=timestamp)
s = a.get_scale_factor()
self.assertAlmostEqual(s*known_detuning, 1, places=places)
print('Scale factor {:.4f} known detuning {:.4f}'.format(
s, known_detuning))
class Test_CZ_1QPhaseCal_Analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_zero_phase_diff_intersect(self):
a = ma.CZ_1QPhaseCal_Analysis(t_start='20171126_180251',
options_dict={'ch_idx':1})
self.assertAlmostEqual(a.get_zero_phase_diff_intersect(),
.058, places=3)
a = ma.CZ_1QPhaseCal_Analysis(t_start='20171126_181327',
options_dict={'ch_idx':0})
self.assertAlmostEqual(a.get_zero_phase_diff_intersect(),
.1218, places=3)
class Test_Idling_Error_Rate_Analyisis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
@unittest.skip("TODO: fix this test")
def test_error_rates_vary_N2(self):
a=ma.Idling_Error_Rate_Analyisis(
t_start ='20180210_181633',
options_dict={'close_figs':True, 'vary_N2': True})
expected_dict = {'A': 0.41685563870942149,
'N1': 1064.7100611208791,
'N2': 3644.550952436859,
'offset': 0.52121402524448934}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit +'].best_values[key], value, decimal=2)
expected_dict = {'A': -0.13013585779457398,
'N1': 1138.3895116903586,
'N2': 601415.64642756886,
'offset': 0.14572799876310505}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit 0'].best_values[key], value, decimal=2)
expected_dict = {'A': 0.74324542246644376,
'N1': 939.61974247762646,
'N2': 3566698.2870284803,
'offset': 0.18301612896797623}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit 1'].best_values[key], value, decimal=2)
def test_error_rates_fixed_N2(self):
a=ma.Idling_Error_Rate_Analyisis(
t_start ='20180210_181633',
options_dict={'close_figs':True, 'vary_N2': False})
expected_dict = {'A': 0.43481425072120633,
'N1': 1034.9644095297574,
'N2': 1e+21,
'offset': 0.50671519356947314}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit +'].best_values[key], value, decimal=2)
expected_dict = {'A': -0.13013614484482647,
'N1': 1138.3896694924019,
'N2': 1e+21,
'offset': 0.1457282565842071}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit 0'].best_values[key], value, decimal=2)
expected_dict = {'A': 0.7432454022744126,
'N1': 939.61870748568992,
'N2': 1e+21,
'offset': 0.18301632862249007}
for key, value in expected_dict.items():
np.testing.assert_almost_equal(
a.fit_res['fit 1'].best_values[key], value, decimal=2)
class Test_Conditional_Oscillation_Analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_condition_oscillation_extracted_pars(self):
a=ma.Conditional_Oscillation_Analysis(t_start='20180130_191229')
extracted = np.array([a.proc_data_dict['phi_cond'][0],
a.proc_data_dict['phi_cond'][1],
a.proc_data_dict['phi_0'][0],
a.proc_data_dict['phi_0'][1],
a.proc_data_dict['phi_1'][0],
a.proc_data_dict['phi_1'][1],
a.proc_data_dict['osc_amp_0'][0],
a.proc_data_dict['osc_amp_0'][1],
a.proc_data_dict['osc_amp_1'][0],
a.proc_data_dict['osc_amp_1'][1],
a.proc_data_dict['offs_diff'][0],
a.proc_data_dict['offs_diff'][1],
a.proc_data_dict['osc_offs_0'][0],
a.proc_data_dict['osc_offs_0'][1],
a.proc_data_dict['osc_offs_1'][0],
a.proc_data_dict['osc_offs_1'][1]])
expected = np.array([178.48651885251698,
2.9898741913646272,
3.3113606223925696,
1.691099641377918,
181.79787947490954,
2.4656702300023334,
0.3236975712917689,
0.009554015995104392,
0.27746120839251875,
0.011940273429767528,
0.00150761346297007,
0.010813168390439071,
0.4868083822944365,
0.0067557093031373455,
0.4883159957574066,
0.0084430446197739737])
np.testing.assert_almost_equal(extracted, expected, decimal=3)
|
"""Certbot main entry point."""
from __future__ import print_function
import functools
import logging.handlers
import os
import sys
import configobj
import zope.component
from acme import jose
from acme import errors as acme_errors
import certbot
from certbot import account
from certbot import cert_manager
from certbot import cli
from certbot import client
from certbot import configuration
from certbot import constants
from certbot import crypto_util
from certbot import eff
from certbot import errors
from certbot import hooks
from certbot import interfaces
from certbot import log
from certbot import renewal
from certbot import reporter
from certbot import storage
from certbot import util
from certbot.display import util as display_util, ops as display_ops
from certbot.plugins import disco as plugins_disco
from certbot.plugins import selection as plug_sel
USER_CANCELLED = ("User chose to cancel the operation and may "
"reinvoke the client.")
logger = logging.getLogger(__name__)
def _suggest_donation_if_appropriate(config):
"""Potentially suggest a donation to support Certbot."""
assert config.verb != "renew"
if config.staging:
# --dry-run implies --staging
return
reporter_util = zope.component.getUtility(interfaces.IReporter)
msg = ("If you like Certbot, please consider supporting our work by:\n\n"
"Donating to ISRG / Let's Encrypt: https://letsencrypt.org/donate\n"
"Donating to EFF: https://eff.org/donate-le\n\n")
reporter_util.add_message(msg, reporter_util.LOW_PRIORITY)
def _report_successful_dry_run(config):
reporter_util = zope.component.getUtility(interfaces.IReporter)
assert config.verb != "renew"
reporter_util.add_message("The dry run was successful.",
reporter_util.HIGH_PRIORITY, on_crash=False)
def _get_and_save_cert(le_client, config, domains=None, certname=None, lineage=None):
"""Authenticate and enroll certificate.
This method finds the relevant lineage, figures out what to do with it,
then performs that action. Includes calls to hooks, various reports,
checks, and requests for user input.
:returns: the issued certificate or `None` if doing a dry run
:rtype: `storage.RenewableCert` or `None`
"""
hooks.pre_hook(config)
try:
if lineage is not None:
# Renewal, where we already know the specific lineage we're
# interested in
logger.info("Renewing an existing certificate")
renewal.renew_cert(config, domains, le_client, lineage)
else:
# TREAT AS NEW REQUEST
assert domains is not None
logger.info("Obtaining a new certificate")
lineage = le_client.obtain_and_enroll_certificate(domains, certname)
if lineage is False:
raise errors.Error("Certificate could not be obtained")
elif lineage is not None:
hooks.deploy_hook(config, lineage.names(), lineage.live_dir)
finally:
hooks.post_hook(config)
return lineage
def _handle_subset_cert_request(config, domains, cert):
"""Figure out what to do if a previous cert had a subset of the names now requested
:param storage.RenewableCert cert:
:returns: Tuple of (str action, cert_or_None) as per _find_lineage_for_domains_and_certname
action can be: "newcert" | "renew" | "reinstall"
:rtype: tuple
"""
existing = ", ".join(cert.names())
question = (
"You have an existing certificate that contains a portion of "
"the domains you requested (ref: {0}){br}{br}It contains these "
"names: {1}{br}{br}You requested these names for the new "
"certificate: {2}.{br}{br}Do you want to expand and replace this existing "
"certificate with the new certificate?"
).format(cert.configfile.filename,
existing,
", ".join(domains),
br=os.linesep)
if config.expand or config.renew_by_default or zope.component.getUtility(
interfaces.IDisplay).yesno(question, "Expand", "Cancel",
cli_flag="--expand",
force_interactive=True):
return "renew", cert
else:
reporter_util = zope.component.getUtility(interfaces.IReporter)
reporter_util.add_message(
"To obtain a new certificate that contains these names without "
"replacing your existing certificate for {0}, you must use the "
"--duplicate option.{br}{br}"
"For example:{br}{br}{1} --duplicate {2}".format(
existing,
sys.argv[0], " ".join(sys.argv[1:]),
br=os.linesep
),
reporter_util.HIGH_PRIORITY)
raise errors.Error(USER_CANCELLED)
def _handle_identical_cert_request(config, lineage):
"""Figure out what to do if a lineage has the same names as a previously obtained one
:param storage.RenewableCert lineage:
:returns: Tuple of (str action, cert_or_None) as per _find_lineage_for_domains_and_certname
action can be: "newcert" | "renew" | "reinstall"
:rtype: tuple
"""
if not lineage.ensure_deployed():
return "reinstall", lineage
if renewal.should_renew(config, lineage):
return "renew", lineage
if config.reinstall:
# Set with --reinstall, force an identical certificate to be
# reinstalled without further prompting.
return "reinstall", lineage
question = (
"You have an existing certificate that has exactly the same "
"domains or certificate name you requested and isn't close to expiry."
"{br}(ref: {0}){br}{br}What would you like to do?"
).format(lineage.configfile.filename, br=os.linesep)
if config.verb == "run":
keep_opt = "Attempt to reinstall this existing certificate"
elif config.verb == "certonly":
keep_opt = "Keep the existing certificate for now"
choices = [keep_opt,
"Renew & replace the cert (limit ~5 per 7 days)"]
display = zope.component.getUtility(interfaces.IDisplay)
response = display.menu(question, choices,
default=0, force_interactive=True)
if response[0] == display_util.CANCEL:
# TODO: Add notification related to command-line options for
# skipping the menu for this case.
raise errors.Error(
"Operation canceled. You may re-run the client.")
elif response[1] == 0:
return "reinstall", lineage
elif response[1] == 1:
return "renew", lineage
else:
assert False, "This is impossible"
def _find_lineage_for_domains(config, domains):
"""Determine whether there are duplicated names and how to handle
them (renew, reinstall, newcert, or raising an error to stop
the client run if the user chooses to cancel the operation when
prompted).
:returns: Two-element tuple containing desired new-certificate behavior as
a string token ("reinstall", "renew", or "newcert"), plus either
a RenewableCert instance or None if renewal shouldn't occur.
:raises .Error: If the user would like to rerun the client again.
"""
# Considering the possibility that the requested certificate is
# related to an existing certificate. (config.duplicate, which
# is set with --duplicate, skips all of this logic and forces any
# kind of certificate to be obtained with renewal = False.)
if config.duplicate:
return "newcert", None
# TODO: Also address superset case
ident_names_cert, subset_names_cert = cert_manager.find_duplicative_certs(config, domains)
# XXX ^ schoen is not sure whether that correctly reads the systemwide
# configuration file.
if ident_names_cert is None and subset_names_cert is None:
return "newcert", None
if ident_names_cert is not None:
return _handle_identical_cert_request(config, ident_names_cert)
elif subset_names_cert is not None:
return _handle_subset_cert_request(config, domains, subset_names_cert)
def _find_cert(config, domains, certname):
"""Finds an existing certificate object given domains and/or a certificate name.
:returns: Two-element tuple of a boolean that indicates if this function should be
followed by a call to fetch a certificate from the server, and either a
RenewableCert instance or None.
"""
action, lineage = _find_lineage_for_domains_and_certname(config, domains, certname)
if action == "reinstall":
logger.info("Keeping the existing certificate")
return (action != "reinstall"), lineage
def _find_lineage_for_domains_and_certname(config, domains, certname):
"""Find appropriate lineage based on given domains and/or certname.
:returns: Two-element tuple containing desired new-certificate behavior as
a string token ("reinstall", "renew", or "newcert"), plus either
a RenewableCert instance or None if renewal shouldn't occur.
:raises .Error: If the user would like to rerun the client again.
"""
if not certname:
return _find_lineage_for_domains(config, domains)
else:
lineage = cert_manager.lineage_for_certname(config, certname)
if lineage:
if domains:
if set(cert_manager.domains_for_certname(config, certname)) != set(domains):
_ask_user_to_confirm_new_names(config, domains, certname,
lineage.names()) # raises if no
return "renew", lineage
# unnecessarily specified domains or no domains specified
return _handle_identical_cert_request(config, lineage)
else:
if domains:
return "newcert", None
else:
raise errors.ConfigurationError("No certificate with name {0} found. "
"Use -d to specify domains, or run certbot --certificates to see "
"possible certificate names.".format(certname))
def _ask_user_to_confirm_new_names(config, new_domains, certname, old_domains):
"""Ask user to confirm update cert certname to contain new_domains.
"""
if config.renew_with_new_domains:
return
msg = ("You are updating certificate {0} to include domains: {1}{br}{br}"
"It previously included domains: {2}{br}{br}"
"Did you intend to make this change?".format(
certname,
", ".join(new_domains),
", ".join(old_domains),
br=os.linesep))
obj = zope.component.getUtility(interfaces.IDisplay)
if not obj.yesno(msg, "Update cert", "Cancel", default=True):
raise errors.ConfigurationError("Specified mismatched cert name and domains.")
def _find_domains_or_certname(config, installer):
"""Retrieve domains and certname from config or user input.
"""
domains = None
certname = config.certname
# first, try to get domains from the config
if config.domains:
domains = config.domains
# if we can't do that but we have a certname, get the domains
# with that certname
elif certname:
domains = cert_manager.domains_for_certname(config, certname)
# that certname might not have existed, or there was a problem.
# try to get domains from the user.
if not domains:
domains = display_ops.choose_names(installer)
if not domains and not certname:
raise errors.Error("Please specify --domains, or --installer that "
"will help in domain names autodiscovery, or "
"--cert-name for an existing certificate name.")
return domains, certname
def _report_new_cert(config, cert_path, fullchain_path, key_path=None):
"""Reports the creation of a new certificate to the user.
:param str cert_path: path to cert
:param str fullchain_path: path to full chain
:param str key_path: path to private key, if available
"""
if config.dry_run:
_report_successful_dry_run(config)
return
assert cert_path and fullchain_path, "No certificates saved to report."
expiry = crypto_util.notAfter(cert_path).date()
reporter_util = zope.component.getUtility(interfaces.IReporter)
# Print the path to fullchain.pem because that's what modern webservers
# (Nginx and Apache2.4) will want.
verbswitch = ' with the "certonly" option' if config.verb == "run" else ""
privkey_statement = 'Your key file has been saved at:{br}{0}{br}'.format(
key_path, br=os.linesep) if key_path else ""
# XXX Perhaps one day we could detect the presence of known old webservers
# and say something more informative here.
msg = ('Congratulations! Your certificate and chain have been saved at:{br}'
'{0}{br}{1}'
'Your cert will expire on {2}. To obtain a new or tweaked version of this '
'certificate in the future, simply run {3} again{4}. '
'To non-interactively renew *all* of your certificates, run "{3} renew"'
.format(fullchain_path, privkey_statement, expiry, cli.cli_command, verbswitch,
br=os.linesep))
reporter_util.add_message(msg, reporter_util.MEDIUM_PRIORITY)
def _determine_account(config):
"""Determine which account to use.
In order to make the renewer (configuration de/serialization) happy,
if ``config.account`` is ``None``, it will be updated based on the
user input. Same for ``config.email``.
:param argparse.Namespace config: CLI arguments
:param certbot.interface.IConfig config: Configuration object
:param .AccountStorage account_storage: Account storage.
:returns: Account and optionally ACME client API (biproduct of new
registration).
:rtype: `tuple` of `certbot.account.Account` and
`acme.client.Client`
"""
account_storage = account.AccountFileStorage(config)
acme = None
if config.account is not None:
acc = account_storage.load(config.account)
else:
accounts = account_storage.find_all()
if len(accounts) > 1:
acc = display_ops.choose_account(accounts)
elif len(accounts) == 1:
acc = accounts[0]
else: # no account registered yet
if config.email is None and not config.register_unsafely_without_email:
config.email = display_ops.get_email()
def _tos_cb(regr):
if config.tos:
return True
msg = ("Please read the Terms of Service at {0}. You "
"must agree in order to register with the ACME "
"server at {1}".format(
regr.terms_of_service, config.server))
obj = zope.component.getUtility(interfaces.IDisplay)
return obj.yesno(msg, "Agree", "Cancel",
cli_flag="--agree-tos", force_interactive=True)
try:
acc, acme = client.register(
config, account_storage, tos_cb=_tos_cb)
except errors.MissingCommandlineFlag:
raise
except errors.Error as error:
logger.debug(error, exc_info=True)
raise errors.Error(
"Unable to register an account with ACME server")
config.account = acc.id
return acc, acme
def _delete_if_appropriate(config): # pylint: disable=too-many-locals,too-many-branches
"""Does the user want to delete their now-revoked certs? If run in non-interactive mode,
deleting happens automatically, unless if both `--cert-name` and `--cert-path` were
specified with conflicting values.
:param `configuration.NamespaceConfig` config: parsed command line arguments
:raises `error.Errors`: If anything goes wrong, including bad user input, if an overlapping
archive dir is found for the specified lineage, etc ...
"""
display = zope.component.getUtility(interfaces.IDisplay)
reporter_util = zope.component.getUtility(interfaces.IReporter)
msg = ("Would you like to delete the cert(s) you just revoked?")
attempt_deletion = display.yesno(msg, yes_label="Yes (recommended)", no_label="No",
force_interactive=True, default=True)
if not attempt_deletion:
reporter_util.add_message("Not deleting revoked certs.", reporter_util.LOW_PRIORITY)
return
if not (config.certname or config.cert_path):
raise errors.Error('At least one of --cert-path or --cert-name must be specified.')
if config.certname and config.cert_path:
# first, check if certname and cert_path imply the same certs
implied_cert_name = cert_manager.cert_path_to_lineage(config)
if implied_cert_name != config.certname:
cert_path_implied_cert_name = cert_manager.cert_path_to_lineage(config)
cert_path_implied_conf = storage.renewal_file_for_certname(config,
cert_path_implied_cert_name)
cert_path_cert = storage.RenewableCert(cert_path_implied_conf, config)
cert_path_info = cert_manager.human_readable_cert_info(config, cert_path_cert,
skip_filter_checks=True)
cert_name_implied_conf = storage.renewal_file_for_certname(config, config.certname)
cert_name_cert = storage.RenewableCert(cert_name_implied_conf, config)
cert_name_info = cert_manager.human_readable_cert_info(config, cert_name_cert)
msg = ("You specified conflicting values for --cert-path and --cert-name. "
"Which did you mean to select?")
choices = [cert_path_info, cert_name_info]
try:
code, index = display.menu(msg,
choices, ok_label="Select", force_interactive=True)
except errors.MissingCommandlineFlag:
error_msg = ('To run in non-interactive mode, you must either specify only one of '
'--cert-path or --cert-name, or both must point to the same certificate lineages.')
raise errors.Error(error_msg)
if code != display_util.OK or not index in range(0, len(choices)):
raise errors.Error("User ended interaction.")
if index == 0:
config.certname = cert_path_implied_cert_name
else:
config.cert_path = storage.cert_path_for_cert_name(config, config.certname)
elif config.cert_path:
config.certname = cert_manager.cert_path_to_lineage(config)
else: # if only config.certname was specified
config.cert_path = storage.cert_path_for_cert_name(config, config.certname)
# don't delete if the archive_dir is used by some other lineage
archive_dir = storage.full_archive_path(
configobj.ConfigObj(storage.renewal_file_for_certname(config, config.certname)),
config, config.certname)
try:
cert_manager.match_and_check_overlaps(config, [lambda x: archive_dir],
lambda x: x.archive_dir, lambda x: x)
except errors.OverlappingMatchFound:
msg = ('Not deleting revoked certs due to overlapping archive dirs. More than '
'one lineage is using {0}'.format(archive_dir))
reporter_util.add_message(''.join(msg), reporter_util.MEDIUM_PRIORITY)
return
except Exception as e:
msg = ('config.default_archive_dir: {0}, config.live_dir: {1}, archive_dir: {2},'
'original exception: {3}')
msg = msg.format(config.default_archive_dir, config.live_dir, archive_dir, e)
raise errors.Error(msg)
cert_manager.delete(config)
def _init_le_client(config, authenticator, installer):
if authenticator is not None:
# if authenticator was given, then we will need account...
acc, acme = _determine_account(config)
logger.debug("Picked account: %r", acc)
# XXX
#crypto_util.validate_key_csr(acc.key)
else:
acc, acme = None, None
return client.Client(config, acc, authenticator, installer, acme=acme)
def unregister(config, unused_plugins):
"""Deactivate account on server"""
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
reporter_util = zope.component.getUtility(interfaces.IReporter)
if not accounts:
return "Could not find existing account to deactivate."
yesno = zope.component.getUtility(interfaces.IDisplay).yesno
prompt = ("Are you sure you would like to irrevocably deactivate "
"your account?")
wants_deactivate = yesno(prompt, yes_label='Deactivate', no_label='Abort',
default=True)
if not wants_deactivate:
return "Deactivation aborted."
acc, acme = _determine_account(config)
cb_client = client.Client(config, acc, None, None, acme=acme)
# delete on boulder
cb_client.acme.deactivate_registration(acc.regr)
account_files = account.AccountFileStorage(config)
# delete local account files
account_files.delete(config.account)
reporter_util.add_message("Account deactivated.", reporter_util.MEDIUM_PRIORITY)
def register(config, unused_plugins):
"""Create or modify accounts on the server."""
# Portion of _determine_account logic to see whether accounts already
# exist or not.
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
reporter_util = zope.component.getUtility(interfaces.IReporter)
add_msg = lambda m: reporter_util.add_message(m, reporter_util.MEDIUM_PRIORITY)
# registering a new account
if not config.update_registration:
if len(accounts) > 0:
# TODO: add a flag to register a duplicate account (this will
# also require extending _determine_account's behavior
# or else extracting the registration code from there)
return ("There is an existing account; registration of a "
"duplicate account with this command is currently "
"unsupported.")
# _determine_account will register an account
_determine_account(config)
return
# --update-registration
if len(accounts) == 0:
return "Could not find an existing account to update."
if config.email is None:
if config.register_unsafely_without_email:
return ("--register-unsafely-without-email provided, however, a "
"new e-mail address must\ncurrently be provided when "
"updating a registration.")
config.email = display_ops.get_email(optional=False)
acc, acme = _determine_account(config)
cb_client = client.Client(config, acc, None, None, acme=acme)
# We rely on an exception to interrupt this process if it didn't work.
acc.regr = cb_client.acme.update_registration(acc.regr.update(
body=acc.regr.body.update(contact=('mailto:' + config.email,))))
account_storage.save_regr(acc, cb_client.acme)
eff.handle_subscription(config)
add_msg("Your e-mail address was updated to {0}.".format(config.email))
def _install_cert(config, le_client, domains, lineage=None):
path_provider = lineage if lineage else config
assert path_provider.cert_path is not None
le_client.deploy_certificate(domains, path_provider.key_path,
path_provider.cert_path, path_provider.chain_path, path_provider.fullchain_path)
le_client.enhance_config(domains, path_provider.chain_path)
def install(config, plugins):
"""Install a previously obtained cert in a server."""
# XXX: Update for renewer/RenewableCert
# FIXME: be consistent about whether errors are raised or returned from
# this function ...
try:
installer, _ = plug_sel.choose_configurator_plugins(config, plugins, "install")
except errors.PluginSelectionError as e:
return str(e)
domains, _ = _find_domains_or_certname(config, installer)
le_client = _init_le_client(config, authenticator=None, installer=installer)
_install_cert(config, le_client, domains)
def plugins_cmd(config, plugins):
"""List server software plugins."""
logger.debug("Expected interfaces: %s", config.ifaces)
ifaces = [] if config.ifaces is None else config.ifaces
filtered = plugins.visible().ifaces(ifaces)
logger.debug("Filtered plugins: %r", filtered)
notify = functools.partial(zope.component.getUtility(
interfaces.IDisplay).notification, pause=False)
if not config.init and not config.prepare:
notify(str(filtered))
return
filtered.init(config)
verified = filtered.verify(ifaces)
logger.debug("Verified plugins: %r", verified)
if not config.prepare:
notify(str(verified))
return
verified.prepare()
available = verified.available()
logger.debug("Prepared plugins: %s", available)
notify(str(available))
def rollback(config, plugins):
"""Rollback server configuration changes made during install."""
client.rollback(config.installer, config.checkpoints, config, plugins)
def config_changes(config, unused_plugins):
"""Show changes made to server config during installation
View checkpoints and associated configuration changes.
"""
client.view_config_changes(config, num=config.num)
def update_symlinks(config, unused_plugins):
"""Update the certificate file family symlinks
Use the information in the config file to make symlinks point to
the correct archive directory.
"""
cert_manager.update_live_symlinks(config)
def rename(config, unused_plugins):
"""Rename a certificate
Use the information in the config file to rename an existing
lineage.
"""
cert_manager.rename_lineage(config)
def delete(config, unused_plugins):
"""Delete a certificate
Use the information in the config file to delete an existing
lineage.
"""
cert_manager.delete(config)
def certificates(config, unused_plugins):
"""Display information about certs configured with Certbot
"""
cert_manager.certificates(config)
def revoke(config, unused_plugins): # TODO: coop with renewal config
"""Revoke a previously obtained certificate."""
# For user-agent construction
config.installer = config.authenticator = "None"
if config.key_path is not None: # revocation by cert key
logger.debug("Revoking %s using cert key %s",
config.cert_path[0], config.key_path[0])
crypto_util.verify_cert_matches_priv_key(config.cert_path[0], config.key_path[0])
key = jose.JWK.load(config.key_path[1])
else: # revocation by account key
logger.debug("Revoking %s using Account Key", config.cert_path[0])
acc, _ = _determine_account(config)
key = acc.key
acme = client.acme_from_config_key(config, key)
cert = crypto_util.pyopenssl_load_certificate(config.cert_path[1])[0]
logger.debug("Reason code for revocation: %s", config.reason)
try:
acme.revoke(jose.ComparableX509(cert), config.reason)
_delete_if_appropriate(config)
except acme_errors.ClientError as e:
return str(e)
display_ops.success_revocation(config.cert_path[0])
def run(config, plugins): # pylint: disable=too-many-branches,too-many-locals
"""Obtain a certificate and install."""
# TODO: Make run as close to auth + install as possible
# Possible difficulties: config.csr was hacked into auth
try:
installer, authenticator = plug_sel.choose_configurator_plugins(config, plugins, "run")
except errors.PluginSelectionError as e:
return str(e)
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(config, authenticator, installer)
domains, certname = _find_domains_or_certname(config, installer)
should_get_cert, lineage = _find_cert(config, domains, certname)
new_lineage = lineage
if should_get_cert:
new_lineage = _get_and_save_cert(le_client, config, domains,
certname, lineage)
cert_path = new_lineage.cert_path if new_lineage else None
fullchain_path = new_lineage.fullchain_path if new_lineage else None
key_path = new_lineage.key_path if new_lineage else None
_report_new_cert(config, cert_path, fullchain_path, key_path)
_install_cert(config, le_client, domains, new_lineage)
if lineage is None or not should_get_cert:
display_ops.success_installation(domains)
else:
display_ops.success_renewal(domains)
_suggest_donation_if_appropriate(config)
def _csr_get_and_save_cert(config, le_client):
"""Obtain a cert using a user-supplied CSR
This works differently in the CSR case (for now) because we don't
have the privkey, and therefore can't construct the files for a lineage.
So we just save the cert & chain to disk :/
"""
csr, _ = config.actual_csr
certr, chain = le_client.obtain_certificate_from_csr(config.domains, csr)
if config.dry_run:
logger.debug(
"Dry run: skipping saving certificate to %s", config.cert_path)
return None, None
cert_path, _, fullchain_path = le_client.save_certificate(
certr, chain, config.cert_path, config.chain_path, config.fullchain_path)
return cert_path, fullchain_path
def renew_cert(config, plugins, lineage):
"""Renew & save an existing cert. Do not install it."""
try:
# installers are used in auth mode to determine domain names
installer, auth = plug_sel.choose_configurator_plugins(config, plugins, "certonly")
except errors.PluginSelectionError as e:
logger.info("Could not choose appropriate plugin: %s", e)
raise
le_client = _init_le_client(config, auth, installer)
_get_and_save_cert(le_client, config, lineage=lineage)
notify = zope.component.getUtility(interfaces.IDisplay).notification
if installer is None:
notify("new certificate deployed without reload, fullchain is {0}".format(
lineage.fullchain), pause=False)
else:
# In case of a renewal, reload server to pick up new certificate.
# In principle we could have a configuration option to inhibit this
# from happening.
installer.restart()
notify("new certificate deployed with reload of {0} server; fullchain is {1}".format(
config.installer, lineage.fullchain), pause=False)
def certonly(config, plugins):
"""Authenticate & obtain cert, but do not install it.
This implements the 'certonly' subcommand."""
# SETUP: Select plugins and construct a client instance
try:
# installers are used in auth mode to determine domain names
installer, auth = plug_sel.choose_configurator_plugins(config, plugins, "certonly")
except errors.PluginSelectionError as e:
logger.info("Could not choose appropriate plugin: %s", e)
raise
le_client = _init_le_client(config, auth, installer)
if config.csr:
cert_path, fullchain_path = _csr_get_and_save_cert(config, le_client)
_report_new_cert(config, cert_path, fullchain_path)
_suggest_donation_if_appropriate(config)
return
domains, certname = _find_domains_or_certname(config, installer)
should_get_cert, lineage = _find_cert(config, domains, certname)
if not should_get_cert:
notify = zope.component.getUtility(interfaces.IDisplay).notification
notify("Certificate not yet due for renewal; no action taken.", pause=False)
return
lineage = _get_and_save_cert(le_client, config, domains, certname, lineage)
cert_path = lineage.cert_path if lineage else None
fullchain_path = lineage.fullchain_path if lineage else None
key_path = lineage.key_path if lineage else None
_report_new_cert(config, cert_path, fullchain_path, key_path)
_suggest_donation_if_appropriate(config)
def renew(config, unused_plugins):
"""Renew previously-obtained certificates."""
try:
renewal.handle_renewal_request(config)
finally:
hooks.run_saved_post_hooks()
def make_or_verify_needed_dirs(config):
"""Create or verify existence of config, work, and hook directories."""
util.set_up_core_dir(config.config_dir, constants.CONFIG_DIRS_MODE,
os.geteuid(), config.strict_permissions)
util.set_up_core_dir(config.work_dir, constants.CONFIG_DIRS_MODE,
os.geteuid(), config.strict_permissions)
hook_dirs = (config.renewal_pre_hooks_dir,
config.renewal_deploy_hooks_dir,
config.renewal_post_hooks_dir,)
for hook_dir in hook_dirs:
util.make_or_verify_dir(hook_dir,
uid=os.geteuid(),
strict=config.strict_permissions)
def set_displayer(config):
"""Set the displayer"""
if config.quiet:
config.noninteractive_mode = True
displayer = display_util.NoninteractiveDisplay(open(os.devnull, "w"))
elif config.noninteractive_mode:
displayer = display_util.NoninteractiveDisplay(sys.stdout)
else:
displayer = display_util.FileDisplay(sys.stdout,
config.force_interactive)
zope.component.provideUtility(displayer)
def main(cli_args=sys.argv[1:]):
"""Command line argument parsing and main script execution."""
log.pre_arg_parse_setup()
plugins = plugins_disco.PluginsRegistry.find_all()
logger.debug("certbot version: %s", certbot.__version__)
# do not log `config`, as it contains sensitive data (e.g. revoke --key)!
logger.debug("Arguments: %r", cli_args)
logger.debug("Discovered plugins: %r", plugins)
# note: arg parser internally handles --help (and exits afterwards)
args = cli.prepare_and_parse_args(plugins, cli_args)
config = configuration.NamespaceConfig(args)
zope.component.provideUtility(config)
try:
log.post_arg_parse_setup(config)
make_or_verify_needed_dirs(config)
except errors.Error:
# Let plugins_cmd be run as un-privileged user.
if config.func != plugins_cmd:
raise
set_displayer(config)
# Reporter
report = reporter.Reporter(config)
zope.component.provideUtility(report)
util.atexit_register(report.print_messages)
return config.func(config, plugins)
if __name__ == "__main__":
err_string = main()
if err_string:
logger.warning("Exiting with message %s", err_string)
sys.exit(err_string) # pragma: no cover
Show a diff when re-creating certificate instead of full list of domains #5274
"""Certbot main entry point."""
from __future__ import print_function
import functools
import logging.handlers
import os
import sys
import configobj
import zope.component
from acme import jose
from acme import errors as acme_errors
import certbot
from certbot import account
from certbot import cert_manager
from certbot import cli
from certbot import client
from certbot import configuration
from certbot import constants
from certbot import crypto_util
from certbot import eff
from certbot import errors
from certbot import hooks
from certbot import interfaces
from certbot import log
from certbot import renewal
from certbot import reporter
from certbot import storage
from certbot import util
from certbot.display import util as display_util, ops as display_ops
from certbot.plugins import disco as plugins_disco
from certbot.plugins import selection as plug_sel
USER_CANCELLED = ("User chose to cancel the operation and may "
"reinvoke the client.")
logger = logging.getLogger(__name__)
def _suggest_donation_if_appropriate(config):
"""Potentially suggest a donation to support Certbot."""
assert config.verb != "renew"
if config.staging:
# --dry-run implies --staging
return
reporter_util = zope.component.getUtility(interfaces.IReporter)
msg = ("If you like Certbot, please consider supporting our work by:\n\n"
"Donating to ISRG / Let's Encrypt: https://letsencrypt.org/donate\n"
"Donating to EFF: https://eff.org/donate-le\n\n")
reporter_util.add_message(msg, reporter_util.LOW_PRIORITY)
def _report_successful_dry_run(config):
reporter_util = zope.component.getUtility(interfaces.IReporter)
assert config.verb != "renew"
reporter_util.add_message("The dry run was successful.",
reporter_util.HIGH_PRIORITY, on_crash=False)
def _get_and_save_cert(le_client, config, domains=None, certname=None, lineage=None):
"""Authenticate and enroll certificate.
This method finds the relevant lineage, figures out what to do with it,
then performs that action. Includes calls to hooks, various reports,
checks, and requests for user input.
:returns: the issued certificate or `None` if doing a dry run
:rtype: `storage.RenewableCert` or `None`
"""
hooks.pre_hook(config)
try:
if lineage is not None:
# Renewal, where we already know the specific lineage we're
# interested in
logger.info("Renewing an existing certificate")
renewal.renew_cert(config, domains, le_client, lineage)
else:
# TREAT AS NEW REQUEST
assert domains is not None
logger.info("Obtaining a new certificate")
lineage = le_client.obtain_and_enroll_certificate(domains, certname)
if lineage is False:
raise errors.Error("Certificate could not be obtained")
elif lineage is not None:
hooks.deploy_hook(config, lineage.names(), lineage.live_dir)
finally:
hooks.post_hook(config)
return lineage
def _handle_subset_cert_request(config, domains, cert):
"""Figure out what to do if a previous cert had a subset of the names now requested
:param storage.RenewableCert cert:
:returns: Tuple of (str action, cert_or_None) as per _find_lineage_for_domains_and_certname
action can be: "newcert" | "renew" | "reinstall"
:rtype: tuple
"""
existing = ", ".join(cert.names())
question = (
"You have an existing certificate that contains a portion of "
"the domains you requested (ref: {0}){br}{br}It contains these "
"names: {1}{br}{br}You requested these names for the new "
"certificate: {2}.{br}{br}Do you want to expand and replace this existing "
"certificate with the new certificate?"
).format(cert.configfile.filename,
existing,
", ".join(domains),
br=os.linesep)
if config.expand or config.renew_by_default or zope.component.getUtility(
interfaces.IDisplay).yesno(question, "Expand", "Cancel",
cli_flag="--expand",
force_interactive=True):
return "renew", cert
else:
reporter_util = zope.component.getUtility(interfaces.IReporter)
reporter_util.add_message(
"To obtain a new certificate that contains these names without "
"replacing your existing certificate for {0}, you must use the "
"--duplicate option.{br}{br}"
"For example:{br}{br}{1} --duplicate {2}".format(
existing,
sys.argv[0], " ".join(sys.argv[1:]),
br=os.linesep
),
reporter_util.HIGH_PRIORITY)
raise errors.Error(USER_CANCELLED)
def _handle_identical_cert_request(config, lineage):
"""Figure out what to do if a lineage has the same names as a previously obtained one
:param storage.RenewableCert lineage:
:returns: Tuple of (str action, cert_or_None) as per _find_lineage_for_domains_and_certname
action can be: "newcert" | "renew" | "reinstall"
:rtype: tuple
"""
if not lineage.ensure_deployed():
return "reinstall", lineage
if renewal.should_renew(config, lineage):
return "renew", lineage
if config.reinstall:
# Set with --reinstall, force an identical certificate to be
# reinstalled without further prompting.
return "reinstall", lineage
question = (
"You have an existing certificate that has exactly the same "
"domains or certificate name you requested and isn't close to expiry."
"{br}(ref: {0}){br}{br}What would you like to do?"
).format(lineage.configfile.filename, br=os.linesep)
if config.verb == "run":
keep_opt = "Attempt to reinstall this existing certificate"
elif config.verb == "certonly":
keep_opt = "Keep the existing certificate for now"
choices = [keep_opt,
"Renew & replace the cert (limit ~5 per 7 days)"]
display = zope.component.getUtility(interfaces.IDisplay)
response = display.menu(question, choices,
default=0, force_interactive=True)
if response[0] == display_util.CANCEL:
# TODO: Add notification related to command-line options for
# skipping the menu for this case.
raise errors.Error(
"Operation canceled. You may re-run the client.")
elif response[1] == 0:
return "reinstall", lineage
elif response[1] == 1:
return "renew", lineage
else:
assert False, "This is impossible"
def _find_lineage_for_domains(config, domains):
"""Determine whether there are duplicated names and how to handle
them (renew, reinstall, newcert, or raising an error to stop
the client run if the user chooses to cancel the operation when
prompted).
:returns: Two-element tuple containing desired new-certificate behavior as
a string token ("reinstall", "renew", or "newcert"), plus either
a RenewableCert instance or None if renewal shouldn't occur.
:raises .Error: If the user would like to rerun the client again.
"""
# Considering the possibility that the requested certificate is
# related to an existing certificate. (config.duplicate, which
# is set with --duplicate, skips all of this logic and forces any
# kind of certificate to be obtained with renewal = False.)
if config.duplicate:
return "newcert", None
# TODO: Also address superset case
ident_names_cert, subset_names_cert = cert_manager.find_duplicative_certs(config, domains)
# XXX ^ schoen is not sure whether that correctly reads the systemwide
# configuration file.
if ident_names_cert is None and subset_names_cert is None:
return "newcert", None
if ident_names_cert is not None:
return _handle_identical_cert_request(config, ident_names_cert)
elif subset_names_cert is not None:
return _handle_subset_cert_request(config, domains, subset_names_cert)
def _find_cert(config, domains, certname):
"""Finds an existing certificate object given domains and/or a certificate name.
:returns: Two-element tuple of a boolean that indicates if this function should be
followed by a call to fetch a certificate from the server, and either a
RenewableCert instance or None.
"""
action, lineage = _find_lineage_for_domains_and_certname(config, domains, certname)
if action == "reinstall":
logger.info("Keeping the existing certificate")
return (action != "reinstall"), lineage
def _find_lineage_for_domains_and_certname(config, domains, certname):
"""Find appropriate lineage based on given domains and/or certname.
:returns: Two-element tuple containing desired new-certificate behavior as
a string token ("reinstall", "renew", or "newcert"), plus either
a RenewableCert instance or None if renewal shouldn't occur.
:raises .Error: If the user would like to rerun the client again.
"""
if not certname:
return _find_lineage_for_domains(config, domains)
else:
lineage = cert_manager.lineage_for_certname(config, certname)
if lineage:
if domains:
if set(cert_manager.domains_for_certname(config, certname)) != set(domains):
_ask_user_to_confirm_new_names(config, domains, certname,
lineage.names()) # raises if no
return "renew", lineage
# unnecessarily specified domains or no domains specified
return _handle_identical_cert_request(config, lineage)
else:
if domains:
return "newcert", None
else:
raise errors.ConfigurationError("No certificate with name {0} found. "
"Use -d to specify domains, or run certbot --certificates to see "
"possible certificate names.".format(certname))
def _get_added_removed(after, before):
"""Get lists of items removed from `before`
and a lists of items added to `after`
"""
added = list(set(after) - set(before))
removed = list(set(before) - set(after))
added.sort()
removed.sort()
return added, removed
def _format_list(character, list):
"""Format list with given character
"""
formatted = "{br}{ch} " + "{br}{ch} ".join(list)
return formatted.format(
ch=character,
br=os.linesep
)
def _ask_user_to_confirm_new_names(config, new_domains, certname, old_domains):
"""Ask user to confirm update cert certname to contain new_domains.
"""
if config.renew_with_new_domains:
return
added, removed = _get_added_removed(new_domains, old_domains)
msg = ("You are updating certificate {0} to include new domain(s): {1}{br}{br}"
"You are also removing previously included domain(s): {2}{br}{br}"
"Did you intend to make this change?".format(
certname,
_format_list("+", added),
_format_list("-", removed),
br=os.linesep))
obj = zope.component.getUtility(interfaces.IDisplay)
if not obj.yesno(msg, "Update cert", "Cancel", default=True):
raise errors.ConfigurationError("Specified mismatched cert name and domains.")
def _find_domains_or_certname(config, installer):
"""Retrieve domains and certname from config or user input.
"""
domains = None
certname = config.certname
# first, try to get domains from the config
if config.domains:
domains = config.domains
# if we can't do that but we have a certname, get the domains
# with that certname
elif certname:
domains = cert_manager.domains_for_certname(config, certname)
# that certname might not have existed, or there was a problem.
# try to get domains from the user.
if not domains:
domains = display_ops.choose_names(installer)
if not domains and not certname:
raise errors.Error("Please specify --domains, or --installer that "
"will help in domain names autodiscovery, or "
"--cert-name for an existing certificate name.")
return domains, certname
def _report_new_cert(config, cert_path, fullchain_path, key_path=None):
"""Reports the creation of a new certificate to the user.
:param str cert_path: path to cert
:param str fullchain_path: path to full chain
:param str key_path: path to private key, if available
"""
if config.dry_run:
_report_successful_dry_run(config)
return
assert cert_path and fullchain_path, "No certificates saved to report."
expiry = crypto_util.notAfter(cert_path).date()
reporter_util = zope.component.getUtility(interfaces.IReporter)
# Print the path to fullchain.pem because that's what modern webservers
# (Nginx and Apache2.4) will want.
verbswitch = ' with the "certonly" option' if config.verb == "run" else ""
privkey_statement = 'Your key file has been saved at:{br}{0}{br}'.format(
key_path, br=os.linesep) if key_path else ""
# XXX Perhaps one day we could detect the presence of known old webservers
# and say something more informative here.
msg = ('Congratulations! Your certificate and chain have been saved at:{br}'
'{0}{br}{1}'
'Your cert will expire on {2}. To obtain a new or tweaked version of this '
'certificate in the future, simply run {3} again{4}. '
'To non-interactively renew *all* of your certificates, run "{3} renew"'
.format(fullchain_path, privkey_statement, expiry, cli.cli_command, verbswitch,
br=os.linesep))
reporter_util.add_message(msg, reporter_util.MEDIUM_PRIORITY)
def _determine_account(config):
"""Determine which account to use.
In order to make the renewer (configuration de/serialization) happy,
if ``config.account`` is ``None``, it will be updated based on the
user input. Same for ``config.email``.
:param argparse.Namespace config: CLI arguments
:param certbot.interface.IConfig config: Configuration object
:param .AccountStorage account_storage: Account storage.
:returns: Account and optionally ACME client API (biproduct of new
registration).
:rtype: `tuple` of `certbot.account.Account` and
`acme.client.Client`
"""
account_storage = account.AccountFileStorage(config)
acme = None
if config.account is not None:
acc = account_storage.load(config.account)
else:
accounts = account_storage.find_all()
if len(accounts) > 1:
acc = display_ops.choose_account(accounts)
elif len(accounts) == 1:
acc = accounts[0]
else: # no account registered yet
if config.email is None and not config.register_unsafely_without_email:
config.email = display_ops.get_email()
def _tos_cb(regr):
if config.tos:
return True
msg = ("Please read the Terms of Service at {0}. You "
"must agree in order to register with the ACME "
"server at {1}".format(
regr.terms_of_service, config.server))
obj = zope.component.getUtility(interfaces.IDisplay)
return obj.yesno(msg, "Agree", "Cancel",
cli_flag="--agree-tos", force_interactive=True)
try:
acc, acme = client.register(
config, account_storage, tos_cb=_tos_cb)
except errors.MissingCommandlineFlag:
raise
except errors.Error as error:
logger.debug(error, exc_info=True)
raise errors.Error(
"Unable to register an account with ACME server")
config.account = acc.id
return acc, acme
def _delete_if_appropriate(config): # pylint: disable=too-many-locals,too-many-branches
"""Does the user want to delete their now-revoked certs? If run in non-interactive mode,
deleting happens automatically, unless if both `--cert-name` and `--cert-path` were
specified with conflicting values.
:param `configuration.NamespaceConfig` config: parsed command line arguments
:raises `error.Errors`: If anything goes wrong, including bad user input, if an overlapping
archive dir is found for the specified lineage, etc ...
"""
display = zope.component.getUtility(interfaces.IDisplay)
reporter_util = zope.component.getUtility(interfaces.IReporter)
msg = ("Would you like to delete the cert(s) you just revoked?")
attempt_deletion = display.yesno(msg, yes_label="Yes (recommended)", no_label="No",
force_interactive=True, default=True)
if not attempt_deletion:
reporter_util.add_message("Not deleting revoked certs.", reporter_util.LOW_PRIORITY)
return
if not (config.certname or config.cert_path):
raise errors.Error('At least one of --cert-path or --cert-name must be specified.')
if config.certname and config.cert_path:
# first, check if certname and cert_path imply the same certs
implied_cert_name = cert_manager.cert_path_to_lineage(config)
if implied_cert_name != config.certname:
cert_path_implied_cert_name = cert_manager.cert_path_to_lineage(config)
cert_path_implied_conf = storage.renewal_file_for_certname(config,
cert_path_implied_cert_name)
cert_path_cert = storage.RenewableCert(cert_path_implied_conf, config)
cert_path_info = cert_manager.human_readable_cert_info(config, cert_path_cert,
skip_filter_checks=True)
cert_name_implied_conf = storage.renewal_file_for_certname(config, config.certname)
cert_name_cert = storage.RenewableCert(cert_name_implied_conf, config)
cert_name_info = cert_manager.human_readable_cert_info(config, cert_name_cert)
msg = ("You specified conflicting values for --cert-path and --cert-name. "
"Which did you mean to select?")
choices = [cert_path_info, cert_name_info]
try:
code, index = display.menu(msg,
choices, ok_label="Select", force_interactive=True)
except errors.MissingCommandlineFlag:
error_msg = ('To run in non-interactive mode, you must either specify only one of '
'--cert-path or --cert-name, or both must point to the same certificate lineages.')
raise errors.Error(error_msg)
if code != display_util.OK or not index in range(0, len(choices)):
raise errors.Error("User ended interaction.")
if index == 0:
config.certname = cert_path_implied_cert_name
else:
config.cert_path = storage.cert_path_for_cert_name(config, config.certname)
elif config.cert_path:
config.certname = cert_manager.cert_path_to_lineage(config)
else: # if only config.certname was specified
config.cert_path = storage.cert_path_for_cert_name(config, config.certname)
# don't delete if the archive_dir is used by some other lineage
archive_dir = storage.full_archive_path(
configobj.ConfigObj(storage.renewal_file_for_certname(config, config.certname)),
config, config.certname)
try:
cert_manager.match_and_check_overlaps(config, [lambda x: archive_dir],
lambda x: x.archive_dir, lambda x: x)
except errors.OverlappingMatchFound:
msg = ('Not deleting revoked certs due to overlapping archive dirs. More than '
'one lineage is using {0}'.format(archive_dir))
reporter_util.add_message(''.join(msg), reporter_util.MEDIUM_PRIORITY)
return
except Exception as e:
msg = ('config.default_archive_dir: {0}, config.live_dir: {1}, archive_dir: {2},'
'original exception: {3}')
msg = msg.format(config.default_archive_dir, config.live_dir, archive_dir, e)
raise errors.Error(msg)
cert_manager.delete(config)
def _init_le_client(config, authenticator, installer):
if authenticator is not None:
# if authenticator was given, then we will need account...
acc, acme = _determine_account(config)
logger.debug("Picked account: %r", acc)
# XXX
#crypto_util.validate_key_csr(acc.key)
else:
acc, acme = None, None
return client.Client(config, acc, authenticator, installer, acme=acme)
def unregister(config, unused_plugins):
"""Deactivate account on server"""
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
reporter_util = zope.component.getUtility(interfaces.IReporter)
if not accounts:
return "Could not find existing account to deactivate."
yesno = zope.component.getUtility(interfaces.IDisplay).yesno
prompt = ("Are you sure you would like to irrevocably deactivate "
"your account?")
wants_deactivate = yesno(prompt, yes_label='Deactivate', no_label='Abort',
default=True)
if not wants_deactivate:
return "Deactivation aborted."
acc, acme = _determine_account(config)
cb_client = client.Client(config, acc, None, None, acme=acme)
# delete on boulder
cb_client.acme.deactivate_registration(acc.regr)
account_files = account.AccountFileStorage(config)
# delete local account files
account_files.delete(config.account)
reporter_util.add_message("Account deactivated.", reporter_util.MEDIUM_PRIORITY)
def register(config, unused_plugins):
"""Create or modify accounts on the server."""
# Portion of _determine_account logic to see whether accounts already
# exist or not.
account_storage = account.AccountFileStorage(config)
accounts = account_storage.find_all()
reporter_util = zope.component.getUtility(interfaces.IReporter)
add_msg = lambda m: reporter_util.add_message(m, reporter_util.MEDIUM_PRIORITY)
# registering a new account
if not config.update_registration:
if len(accounts) > 0:
# TODO: add a flag to register a duplicate account (this will
# also require extending _determine_account's behavior
# or else extracting the registration code from there)
return ("There is an existing account; registration of a "
"duplicate account with this command is currently "
"unsupported.")
# _determine_account will register an account
_determine_account(config)
return
# --update-registration
if len(accounts) == 0:
return "Could not find an existing account to update."
if config.email is None:
if config.register_unsafely_without_email:
return ("--register-unsafely-without-email provided, however, a "
"new e-mail address must\ncurrently be provided when "
"updating a registration.")
config.email = display_ops.get_email(optional=False)
acc, acme = _determine_account(config)
cb_client = client.Client(config, acc, None, None, acme=acme)
# We rely on an exception to interrupt this process if it didn't work.
acc.regr = cb_client.acme.update_registration(acc.regr.update(
body=acc.regr.body.update(contact=('mailto:' + config.email,))))
account_storage.save_regr(acc, cb_client.acme)
eff.handle_subscription(config)
add_msg("Your e-mail address was updated to {0}.".format(config.email))
def _install_cert(config, le_client, domains, lineage=None):
path_provider = lineage if lineage else config
assert path_provider.cert_path is not None
le_client.deploy_certificate(domains, path_provider.key_path,
path_provider.cert_path, path_provider.chain_path, path_provider.fullchain_path)
le_client.enhance_config(domains, path_provider.chain_path)
def install(config, plugins):
"""Install a previously obtained cert in a server."""
# XXX: Update for renewer/RenewableCert
# FIXME: be consistent about whether errors are raised or returned from
# this function ...
try:
installer, _ = plug_sel.choose_configurator_plugins(config, plugins, "install")
except errors.PluginSelectionError as e:
return str(e)
domains, _ = _find_domains_or_certname(config, installer)
le_client = _init_le_client(config, authenticator=None, installer=installer)
_install_cert(config, le_client, domains)
def plugins_cmd(config, plugins):
"""List server software plugins."""
logger.debug("Expected interfaces: %s", config.ifaces)
ifaces = [] if config.ifaces is None else config.ifaces
filtered = plugins.visible().ifaces(ifaces)
logger.debug("Filtered plugins: %r", filtered)
notify = functools.partial(zope.component.getUtility(
interfaces.IDisplay).notification, pause=False)
if not config.init and not config.prepare:
notify(str(filtered))
return
filtered.init(config)
verified = filtered.verify(ifaces)
logger.debug("Verified plugins: %r", verified)
if not config.prepare:
notify(str(verified))
return
verified.prepare()
available = verified.available()
logger.debug("Prepared plugins: %s", available)
notify(str(available))
def rollback(config, plugins):
"""Rollback server configuration changes made during install."""
client.rollback(config.installer, config.checkpoints, config, plugins)
def config_changes(config, unused_plugins):
"""Show changes made to server config during installation
View checkpoints and associated configuration changes.
"""
client.view_config_changes(config, num=config.num)
def update_symlinks(config, unused_plugins):
"""Update the certificate file family symlinks
Use the information in the config file to make symlinks point to
the correct archive directory.
"""
cert_manager.update_live_symlinks(config)
def rename(config, unused_plugins):
"""Rename a certificate
Use the information in the config file to rename an existing
lineage.
"""
cert_manager.rename_lineage(config)
def delete(config, unused_plugins):
"""Delete a certificate
Use the information in the config file to delete an existing
lineage.
"""
cert_manager.delete(config)
def certificates(config, unused_plugins):
"""Display information about certs configured with Certbot
"""
cert_manager.certificates(config)
def revoke(config, unused_plugins): # TODO: coop with renewal config
"""Revoke a previously obtained certificate."""
# For user-agent construction
config.installer = config.authenticator = "None"
if config.key_path is not None: # revocation by cert key
logger.debug("Revoking %s using cert key %s",
config.cert_path[0], config.key_path[0])
crypto_util.verify_cert_matches_priv_key(config.cert_path[0], config.key_path[0])
key = jose.JWK.load(config.key_path[1])
else: # revocation by account key
logger.debug("Revoking %s using Account Key", config.cert_path[0])
acc, _ = _determine_account(config)
key = acc.key
acme = client.acme_from_config_key(config, key)
cert = crypto_util.pyopenssl_load_certificate(config.cert_path[1])[0]
logger.debug("Reason code for revocation: %s", config.reason)
try:
acme.revoke(jose.ComparableX509(cert), config.reason)
_delete_if_appropriate(config)
except acme_errors.ClientError as e:
return str(e)
display_ops.success_revocation(config.cert_path[0])
def run(config, plugins): # pylint: disable=too-many-branches,too-many-locals
"""Obtain a certificate and install."""
# TODO: Make run as close to auth + install as possible
# Possible difficulties: config.csr was hacked into auth
try:
installer, authenticator = plug_sel.choose_configurator_plugins(config, plugins, "run")
except errors.PluginSelectionError as e:
return str(e)
# TODO: Handle errors from _init_le_client?
le_client = _init_le_client(config, authenticator, installer)
domains, certname = _find_domains_or_certname(config, installer)
should_get_cert, lineage = _find_cert(config, domains, certname)
new_lineage = lineage
if should_get_cert:
new_lineage = _get_and_save_cert(le_client, config, domains,
certname, lineage)
cert_path = new_lineage.cert_path if new_lineage else None
fullchain_path = new_lineage.fullchain_path if new_lineage else None
key_path = new_lineage.key_path if new_lineage else None
_report_new_cert(config, cert_path, fullchain_path, key_path)
_install_cert(config, le_client, domains, new_lineage)
if lineage is None or not should_get_cert:
display_ops.success_installation(domains)
else:
display_ops.success_renewal(domains)
_suggest_donation_if_appropriate(config)
def _csr_get_and_save_cert(config, le_client):
"""Obtain a cert using a user-supplied CSR
This works differently in the CSR case (for now) because we don't
have the privkey, and therefore can't construct the files for a lineage.
So we just save the cert & chain to disk :/
"""
csr, _ = config.actual_csr
certr, chain = le_client.obtain_certificate_from_csr(config.domains, csr)
if config.dry_run:
logger.debug(
"Dry run: skipping saving certificate to %s", config.cert_path)
return None, None
cert_path, _, fullchain_path = le_client.save_certificate(
certr, chain, config.cert_path, config.chain_path, config.fullchain_path)
return cert_path, fullchain_path
def renew_cert(config, plugins, lineage):
"""Renew & save an existing cert. Do not install it."""
try:
# installers are used in auth mode to determine domain names
installer, auth = plug_sel.choose_configurator_plugins(config, plugins, "certonly")
except errors.PluginSelectionError as e:
logger.info("Could not choose appropriate plugin: %s", e)
raise
le_client = _init_le_client(config, auth, installer)
_get_and_save_cert(le_client, config, lineage=lineage)
notify = zope.component.getUtility(interfaces.IDisplay).notification
if installer is None:
notify("new certificate deployed without reload, fullchain is {0}".format(
lineage.fullchain), pause=False)
else:
# In case of a renewal, reload server to pick up new certificate.
# In principle we could have a configuration option to inhibit this
# from happening.
installer.restart()
notify("new certificate deployed with reload of {0} server; fullchain is {1}".format(
config.installer, lineage.fullchain), pause=False)
def certonly(config, plugins):
"""Authenticate & obtain cert, but do not install it.
This implements the 'certonly' subcommand."""
# SETUP: Select plugins and construct a client instance
try:
# installers are used in auth mode to determine domain names
installer, auth = plug_sel.choose_configurator_plugins(config, plugins, "certonly")
except errors.PluginSelectionError as e:
logger.info("Could not choose appropriate plugin: %s", e)
raise
le_client = _init_le_client(config, auth, installer)
if config.csr:
cert_path, fullchain_path = _csr_get_and_save_cert(config, le_client)
_report_new_cert(config, cert_path, fullchain_path)
_suggest_donation_if_appropriate(config)
return
domains, certname = _find_domains_or_certname(config, installer)
should_get_cert, lineage = _find_cert(config, domains, certname)
if not should_get_cert:
notify = zope.component.getUtility(interfaces.IDisplay).notification
notify("Certificate not yet due for renewal; no action taken.", pause=False)
return
lineage = _get_and_save_cert(le_client, config, domains, certname, lineage)
cert_path = lineage.cert_path if lineage else None
fullchain_path = lineage.fullchain_path if lineage else None
key_path = lineage.key_path if lineage else None
_report_new_cert(config, cert_path, fullchain_path, key_path)
_suggest_donation_if_appropriate(config)
def renew(config, unused_plugins):
"""Renew previously-obtained certificates."""
try:
renewal.handle_renewal_request(config)
finally:
hooks.run_saved_post_hooks()
def make_or_verify_needed_dirs(config):
"""Create or verify existence of config, work, and hook directories."""
util.set_up_core_dir(config.config_dir, constants.CONFIG_DIRS_MODE,
os.geteuid(), config.strict_permissions)
util.set_up_core_dir(config.work_dir, constants.CONFIG_DIRS_MODE,
os.geteuid(), config.strict_permissions)
hook_dirs = (config.renewal_pre_hooks_dir,
config.renewal_deploy_hooks_dir,
config.renewal_post_hooks_dir,)
for hook_dir in hook_dirs:
util.make_or_verify_dir(hook_dir,
uid=os.geteuid(),
strict=config.strict_permissions)
def set_displayer(config):
"""Set the displayer"""
if config.quiet:
config.noninteractive_mode = True
displayer = display_util.NoninteractiveDisplay(open(os.devnull, "w"))
elif config.noninteractive_mode:
displayer = display_util.NoninteractiveDisplay(sys.stdout)
else:
displayer = display_util.FileDisplay(sys.stdout,
config.force_interactive)
zope.component.provideUtility(displayer)
def main(cli_args=sys.argv[1:]):
"""Command line argument parsing and main script execution."""
log.pre_arg_parse_setup()
plugins = plugins_disco.PluginsRegistry.find_all()
logger.debug("certbot version: %s", certbot.__version__)
# do not log `config`, as it contains sensitive data (e.g. revoke --key)!
logger.debug("Arguments: %r", cli_args)
logger.debug("Discovered plugins: %r", plugins)
# note: arg parser internally handles --help (and exits afterwards)
args = cli.prepare_and_parse_args(plugins, cli_args)
config = configuration.NamespaceConfig(args)
zope.component.provideUtility(config)
try:
log.post_arg_parse_setup(config)
make_or_verify_needed_dirs(config)
except errors.Error:
# Let plugins_cmd be run as un-privileged user.
if config.func != plugins_cmd:
raise
set_displayer(config)
# Reporter
report = reporter.Reporter(config)
zope.component.provideUtility(report)
util.atexit_register(report.print_messages)
return config.func(config, plugins)
if __name__ == "__main__":
err_string = main()
if err_string:
logger.warning("Exiting with message %s", err_string)
sys.exit(err_string) # pragma: no cover
|
#!/bin/env python
import libkeepass
from os import environ
from optparse import OptionParser
def getEntry(obj_root,entryPath, fieldName):
"""Return a specific field from the entry given by entryPath
@:param The path to the entry (sperated by /)
@:param fieldName The name of the field of which the value should be returned.
@:return The value of the field.
"""
entryPath = entryPath.split('/')
title = entryPath[-1]
entryPath = entryPath[:-1]
query = ".//Root/"
for group in entryPath:
query = query + "Group[Name='%s']/" % group
query = query + "Entry[String[Key='Title' and Value='%s']]/String[Key='%s']/Value" % (title,fieldName)
result = obj_root.xpath(query);
if len(result) > 0:
return obj_root.xpath(query)[0];
else:
return None
def listEntriesInGroup(obj_root, entryPath):
"""Return all entries in the given Group.
@:param The path to the group (sperated by /)
@:return A list of entries in the group.
"""
entryPath = entryPath.split('/')
query = ".//Root/"
for group in entryPath:
query = query + "Group[Name='%s']/" % group
query = query + "Entry/String[Key='Title']/Value"
return obj_root.xpath(query)
def listGroupsInGroup(obj_root, entryPath):
"""Return all groups in the given Group.
@:param The path to the group (sperated by /)
@:return A list of groups in the group.
"""
entryPath = entryPath.split('/')
query = ".//Root/"
for group in entryPath:
query = query + "Group[Name='%s']/" % group
query = query + "Group/Name"
return obj_root.xpath(query)
def entryTreeToObject(obj_root, entryPath, fieldNames):
"""Return a specific field from the entry given by entryPath
@:param The path of which the tree should be returned (sperated by /)
@:param fieldNames List fo the names of the field of which the value should be returned.
@:return An object tree (string map) with the lements in the given path.
"""
result = {}
for group in listGroupsInGroup(obj_root,entryPath):
result[group] = entryTreeToObject(obj_root, entryPath + "/" + group, fieldNames)
for entry in listEntriesInGroup(obj_root, entryPath):
result[entry] = {}
for fieldName in fieldNames:
result[entry][fieldName] = getEntry(obj_root, entryPath + "/" + entry, fieldName)
return result
def main():
env_password = environ.get('KEEPASS_PASSWORD')
usage = "Usage: %prog command options\n\ncommand my be one of show-entry, list-entries and to-json"
parser = OptionParser(usage=usage)
parser.add_option("-f", "--file", dest="filename",
help="keepass file", metavar="FILE")
parser.add_option("-p", "--pass",
dest="password", default=env_password,
help="password to open keepass file. The password can also be given through the KEEPASS_PASSWORD environment variable")
parser.add_option("-e", "--entry",
dest="entryPath",
help="field to retrieve (for example Passwordlist/MyGroup/MyEntry")
parser.add_option("-n", "--names",
dest="fieldNames", default="Password",
help="Comma seperated list of fields to retrieve (default: Password, for show-entry only the first one is used)")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("You must give exactly one command (show-entry or or list-entries)");
if args[0] == "show-entry":
with libkeepass.open(options.filename, password=options.password) as kdb:
print(getEntry(kdb.obj_root,options.entryPath,options.fieldNames.split(",")[0]));
if args[0] == "list-entries":
with libkeepass.open(options.filename, password=options.password) as kdb:
print(listEntriesInGroup(kdb.obj_root, options.entryPath));
if args[0] == "to-json":
with libkeepass.open(options.filename, password=options.password) as kdb:
print(entryTreeToObject(kdb.obj_root, options.entryPath, options.fieldNames.split(",")))
Update keepass_cli.py
Updated help text
#!/bin/env python
import libkeepass
from os import environ
from optparse import OptionParser
def getEntry(obj_root,entryPath, fieldName):
"""Return a specific field from the entry given by entryPath
@:param The path to the entry (sperated by /)
@:param fieldName The name of the field of which the value should be returned.
@:return The value of the field.
"""
entryPath = entryPath.split('/')
title = entryPath[-1]
entryPath = entryPath[:-1]
query = ".//Root/"
for group in entryPath:
query = query + "Group[Name='%s']/" % group
query = query + "Entry[String[Key='Title' and Value='%s']]/String[Key='%s']/Value" % (title,fieldName)
result = obj_root.xpath(query);
if len(result) > 0:
return obj_root.xpath(query)[0];
else:
return None
def listEntriesInGroup(obj_root, entryPath):
"""Return all entries in the given Group.
@:param The path to the group (sperated by /)
@:return A list of entries in the group.
"""
entryPath = entryPath.split('/')
query = ".//Root/"
for group in entryPath:
query = query + "Group[Name='%s']/" % group
query = query + "Entry/String[Key='Title']/Value"
return obj_root.xpath(query)
def listGroupsInGroup(obj_root, entryPath):
"""Return all groups in the given Group.
@:param The path to the group (sperated by /)
@:return A list of groups in the group.
"""
entryPath = entryPath.split('/')
query = ".//Root/"
for group in entryPath:
query = query + "Group[Name='%s']/" % group
query = query + "Group/Name"
return obj_root.xpath(query)
def entryTreeToObject(obj_root, entryPath, fieldNames):
"""Return a specific field from the entry given by entryPath
@:param The path of which the tree should be returned (sperated by /)
@:param fieldNames List fo the names of the field of which the value should be returned.
@:return An object tree (string map) with the lements in the given path.
"""
result = {}
for group in listGroupsInGroup(obj_root,entryPath):
result[group] = entryTreeToObject(obj_root, entryPath + "/" + group, fieldNames)
for entry in listEntriesInGroup(obj_root, entryPath):
result[entry] = {}
for fieldName in fieldNames:
result[entry][fieldName] = getEntry(obj_root, entryPath + "/" + entry, fieldName)
return result
def main():
env_password = environ.get('KEEPASS_PASSWORD')
usage = "Usage: %prog command options\n\ncommand my be one of show-entry, list-entries and to-json"
parser = OptionParser(usage=usage)
parser.add_option("-f", "--file", dest="filename",
help="keepass file", metavar="FILE")
parser.add_option("-p", "--pass",
dest="password", default=env_password,
help="password to open keepass file. The password can also be given through the KEEPASS_PASSWORD environment variable")
parser.add_option("-e", "--entry",
dest="entryPath",
help="field to retrieve (for example Passwordlist/MyGroup/MyEntry")
parser.add_option("-n", "--names",
dest="fieldNames", default="Password",
help="Comma seperated list of fields to retrieve (default: Password, for show-entry only the first one is used)")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("You must give exactly one command (show-entry, list-entries or to-json)");
if args[0] == "show-entry":
with libkeepass.open(options.filename, password=options.password) as kdb:
print(getEntry(kdb.obj_root,options.entryPath,options.fieldNames.split(",")[0]));
if args[0] == "list-entries":
with libkeepass.open(options.filename, password=options.password) as kdb:
print(listEntriesInGroup(kdb.obj_root, options.entryPath));
if args[0] == "to-json":
with libkeepass.open(options.filename, password=options.password) as kdb:
print(entryTreeToObject(kdb.obj_root, options.entryPath, options.fieldNames.split(",")))
|
import time
from contextlib import contextmanager
from typing import Any, Callable, Iterator
from unittest import mock, skipUnless
import DNS
from django.conf import settings
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.utils.timezone import now as timezone_now
from zerver.forms import email_is_not_mit_mailing_list
from zerver.lib.rate_limiter import (
RateLimitedIPAddr,
RateLimitedUser,
RateLimiterLockingException,
add_ratelimit_rule,
remove_ratelimit_rule,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.zephyr import compute_mit_user_fullname
from zerver.models import PushDeviceToken, UserProfile
if settings.ZILENCER_ENABLED:
from zilencer.models import RateLimitedRemoteZulipServer, RemoteZulipServer
class MITNameTest(ZulipTestCase):
def test_valid_hesiod(self) -> None:
with mock.patch(
"DNS.dnslookup",
return_value=[
["starnine:*:84233:101:Athena Consulting Exchange User,,,:/mit/starnine:/bin/bash"]
],
):
self.assertEqual(
compute_mit_user_fullname(self.mit_email("starnine")),
"Athena Consulting Exchange User",
)
with mock.patch(
"DNS.dnslookup",
return_value=[["sipbexch:*:87824:101:Exch Sipb,,,:/mit/sipbexch:/bin/athena/bash"]],
):
self.assertEqual(compute_mit_user_fullname("sipbexch@mit.edu"), "Exch Sipb")
def test_invalid_hesiod(self) -> None:
with mock.patch(
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
):
self.assertEqual(compute_mit_user_fullname("1234567890@mit.edu"), "1234567890@mit.edu")
with mock.patch(
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
):
self.assertEqual(compute_mit_user_fullname("ec-discuss@mit.edu"), "ec-discuss@mit.edu")
def test_mailinglist(self) -> None:
with mock.patch(
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
):
self.assertRaises(ValidationError, email_is_not_mit_mailing_list, "1234567890@mit.edu")
with mock.patch(
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
):
self.assertRaises(ValidationError, email_is_not_mit_mailing_list, "ec-discuss@mit.edu")
def test_notmailinglist(self) -> None:
with mock.patch("DNS.dnslookup", return_value=[["POP IMAP.EXCHANGE.MIT.EDU starnine"]]):
email_is_not_mit_mailing_list("sipbexch@mit.edu")
@contextmanager
def rate_limit_rule(range_seconds: int, num_requests: int, domain: str) -> Iterator[None]:
RateLimitedIPAddr("127.0.0.1", domain=domain).clear_history()
add_ratelimit_rule(range_seconds, num_requests, domain=domain)
try:
yield
finally:
# We need this in a finally block to ensure the test cleans up after itself
# even in case of failure, to avoid polluting the rules state.
remove_ratelimit_rule(range_seconds, num_requests, domain=domain)
class RateLimitTests(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
# Some tests here can be somewhat timing-sensitive in a way
# that can't be eliminated, e.g. due to testing things that rely
# on Redis' internal timing mechanism which we can't mock.
# The first API request when running a suite of tests is slow
# and can take multiple seconds. This is not a problem when running
# multiple tests, but if an individual, time-sensitive test from this class
# is run, the first API request it makes taking a lot of time can throw things off
# and cause the test to fail. Thus we do a dummy API request here to warm up
# the system and allow the tests to assume their requests won't take multiple seconds.
user = self.example_user("hamlet")
self.api_get(user, "/api/v1/messages")
settings.RATE_LIMITING = True
add_ratelimit_rule(1, 5)
def tearDown(self) -> None:
settings.RATE_LIMITING = False
remove_ratelimit_rule(1, 5)
super().tearDown()
def send_api_message(self, user: UserProfile, content: str) -> HttpResponse:
return self.api_post(
user,
"/api/v1/messages",
{
"type": "stream",
"to": "Verona",
"client": "test suite",
"content": content,
"topic": "whatever",
},
)
def send_unauthed_api_request(self, **kwargs: Any) -> HttpResponse:
result = self.client_get("/json/messages", **kwargs)
# We're not making a correct request here, but rate-limiting is supposed
# to happen before the request fails due to not being correctly made. Thus
# we expect either an 400 error if the request is allowed by the rate limiter,
# or 429 if we're above the limit. We don't expect to see other status codes here,
# so we assert for safety.
self.assertIn(result.status_code, [400, 429])
return result
def test_headers(self) -> None:
user = self.example_user("hamlet")
RateLimitedUser(user).clear_history()
result = self.send_api_message(user, "some stuff")
self.assertTrue("X-RateLimit-Remaining" in result)
self.assertTrue("X-RateLimit-Limit" in result)
self.assertTrue("X-RateLimit-Reset" in result)
def test_ratelimit_decrease(self) -> None:
user = self.example_user("hamlet")
RateLimitedUser(user).clear_history()
result = self.send_api_message(user, "some stuff")
limit = int(result["X-RateLimit-Remaining"])
result = self.send_api_message(user, "some stuff 2")
newlimit = int(result["X-RateLimit-Remaining"])
self.assertEqual(limit, newlimit + 1)
def do_test_hit_ratelimits(
self,
request_func: Callable[[], HttpResponse],
is_json: bool = True,
) -> HttpResponse:
def api_assert_func(result: HttpResponse) -> None:
self.assertEqual(result.status_code, 429)
self.assertEqual(result.headers["Content-Type"], "application/json")
json = result.json()
self.assertEqual(json.get("result"), "error")
self.assertIn("API usage exceeded rate limit", json.get("msg"))
self.assertEqual(json.get("retry-after"), 0.5)
self.assertTrue("Retry-After" in result)
self.assertEqual(result["Retry-After"], "0.5")
def user_facing_assert_func(result: HttpResponse) -> None:
self.assertEqual(result.status_code, 429)
self.assertNotEqual(result.headers["Content-Type"], "application/json")
self.assert_in_response("Rate limit exceeded.", result)
if is_json:
assert_func = api_assert_func
else:
assert_func = user_facing_assert_func
start_time = time.time()
for i in range(6):
with mock.patch("time.time", return_value=(start_time + i * 0.1)):
result = request_func()
if i < 5:
self.assertNotEqual(result.status_code, 429)
assert_func(result)
# We simulate waiting a second here, rather than force-clearing our history,
# to make sure the rate-limiting code automatically forgives a user
# after some time has passed.
with mock.patch("time.time", return_value=(start_time + 1.01)):
result = request_func()
self.assertNotEqual(result.status_code, 429)
def test_hit_ratelimits_as_user(self) -> None:
user = self.example_user("cordelia")
RateLimitedUser(user).clear_history()
self.do_test_hit_ratelimits(lambda: self.send_api_message(user, "some stuff"))
@rate_limit_rule(1, 5, domain="api_by_ip")
def test_hit_ratelimits_as_ip(self) -> None:
self.do_test_hit_ratelimits(self.send_unauthed_api_request)
# Other IPs should not be rate-limited
resp = self.send_unauthed_api_request(REMOTE_ADDR="127.0.0.2")
self.assertNotEqual(resp.status_code, 429)
@rate_limit_rule(1, 5, domain="sends_email_by_ip")
def test_create_realm_rate_limiting(self) -> None:
with self.settings(OPEN_REALM_CREATION=True):
self.do_test_hit_ratelimits(
lambda: self.client_post("/new/", {"email": "new@zulip.com"}),
is_json=False,
)
@rate_limit_rule(1, 5, domain="sends_email_by_ip")
def test_find_account_rate_limiting(self) -> None:
self.do_test_hit_ratelimits(
lambda: self.client_post("/accounts/find/", {"emails": "new@zulip.com"}),
is_json=False,
)
# Test whether submitting multiple emails is handled correctly.
# The limit is set to 10 per second, so 5 requests with 2 emails
# submitted in each should be allowed.
@rate_limit_rule(1, 10, domain="sends_email_by_ip")
def test_find_account_rate_limiting_multiple(self) -> None:
self.do_test_hit_ratelimits(
lambda: self.client_post("/accounts/find/", {"emails": "new@zulip.com,new2@zulip.com"}),
is_json=False,
)
@rate_limit_rule(1, 5, domain="sends_email_by_ip")
def test_combined_ip_limits(self) -> None:
# Alternate requests to /new/ and /accounts/find/
request_count = 0
def alternate_requests() -> HttpResponse:
nonlocal request_count
request_count += 1
if request_count % 2 == 1:
return self.client_post("/new/", {"email": "new@zulip.com"})
else:
return self.client_post("/accounts/find/", {"emails": "new@zulip.com"})
self.do_test_hit_ratelimits(alternate_requests, is_json=False)
@skipUnless(settings.ZILENCER_ENABLED, "requires zilencer")
@rate_limit_rule(1, 5, domain="api_by_remote_server")
def test_hit_ratelimits_as_remote_server(self) -> None:
server_uuid = "1234-abcd"
server = RemoteZulipServer(
uuid=server_uuid,
api_key="magic_secret_api_key",
hostname="demo.example.com",
last_updated=timezone_now(),
)
server.save()
endpoint = "/api/v1/remotes/push/register"
payload = {"user_id": 10, "token": "111222", "token_kind": PushDeviceToken.GCM}
try:
# Remote servers can only make requests to the root subdomain.
original_default_subdomain = self.DEFAULT_SUBDOMAIN
self.DEFAULT_SUBDOMAIN = ""
RateLimitedRemoteZulipServer(server).clear_history()
with self.assertLogs("zerver.lib.rate_limiter", level="WARNING") as m:
self.do_test_hit_ratelimits(lambda: self.uuid_post(server_uuid, endpoint, payload))
self.assertEqual(
m.output,
[
"WARNING:zerver.lib.rate_limiter:Remote server <RemoteZulipServer demo.example.com 1234-abcd> exceeded rate limits on domain api_by_remote_server"
],
)
finally:
self.DEFAULT_SUBDOMAIN = original_default_subdomain
def test_hit_ratelimiterlockingexception(self) -> None:
user = self.example_user("cordelia")
RateLimitedUser(user).clear_history()
with mock.patch(
"zerver.lib.rate_limiter.RedisRateLimiterBackend.incr_ratelimit",
side_effect=RateLimiterLockingException,
):
with self.assertLogs("zerver.lib.rate_limiter", level="WARNING") as m:
result = self.send_api_message(user, "some stuff")
self.assertEqual(result.status_code, 429)
self.assertEqual(
m.output,
[
"WARNING:zerver.lib.rate_limiter:Deadlock trying to incr_ratelimit for {}".format(
f"RateLimitedUser:{user.id}:api_by_user"
)
],
)
tests: Hitting a rate-limit on find accounts should not send emails.
import time
from contextlib import contextmanager
from typing import Any, Callable, Iterator
from unittest import mock, skipUnless
import DNS
from django.conf import settings
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.utils.timezone import now as timezone_now
from zerver.forms import email_is_not_mit_mailing_list
from zerver.lib.rate_limiter import (
RateLimitedIPAddr,
RateLimitedUser,
RateLimiterLockingException,
add_ratelimit_rule,
remove_ratelimit_rule,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.zephyr import compute_mit_user_fullname
from zerver.models import PushDeviceToken, UserProfile
if settings.ZILENCER_ENABLED:
from zilencer.models import RateLimitedRemoteZulipServer, RemoteZulipServer
class MITNameTest(ZulipTestCase):
def test_valid_hesiod(self) -> None:
with mock.patch(
"DNS.dnslookup",
return_value=[
["starnine:*:84233:101:Athena Consulting Exchange User,,,:/mit/starnine:/bin/bash"]
],
):
self.assertEqual(
compute_mit_user_fullname(self.mit_email("starnine")),
"Athena Consulting Exchange User",
)
with mock.patch(
"DNS.dnslookup",
return_value=[["sipbexch:*:87824:101:Exch Sipb,,,:/mit/sipbexch:/bin/athena/bash"]],
):
self.assertEqual(compute_mit_user_fullname("sipbexch@mit.edu"), "Exch Sipb")
def test_invalid_hesiod(self) -> None:
with mock.patch(
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
):
self.assertEqual(compute_mit_user_fullname("1234567890@mit.edu"), "1234567890@mit.edu")
with mock.patch(
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
):
self.assertEqual(compute_mit_user_fullname("ec-discuss@mit.edu"), "ec-discuss@mit.edu")
def test_mailinglist(self) -> None:
with mock.patch(
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
):
self.assertRaises(ValidationError, email_is_not_mit_mailing_list, "1234567890@mit.edu")
with mock.patch(
"DNS.dnslookup", side_effect=DNS.Base.ServerError("DNS query status: NXDOMAIN", 3)
):
self.assertRaises(ValidationError, email_is_not_mit_mailing_list, "ec-discuss@mit.edu")
def test_notmailinglist(self) -> None:
with mock.patch("DNS.dnslookup", return_value=[["POP IMAP.EXCHANGE.MIT.EDU starnine"]]):
email_is_not_mit_mailing_list("sipbexch@mit.edu")
@contextmanager
def rate_limit_rule(range_seconds: int, num_requests: int, domain: str) -> Iterator[None]:
RateLimitedIPAddr("127.0.0.1", domain=domain).clear_history()
add_ratelimit_rule(range_seconds, num_requests, domain=domain)
try:
yield
finally:
# We need this in a finally block to ensure the test cleans up after itself
# even in case of failure, to avoid polluting the rules state.
remove_ratelimit_rule(range_seconds, num_requests, domain=domain)
class RateLimitTests(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
# Some tests here can be somewhat timing-sensitive in a way
# that can't be eliminated, e.g. due to testing things that rely
# on Redis' internal timing mechanism which we can't mock.
# The first API request when running a suite of tests is slow
# and can take multiple seconds. This is not a problem when running
# multiple tests, but if an individual, time-sensitive test from this class
# is run, the first API request it makes taking a lot of time can throw things off
# and cause the test to fail. Thus we do a dummy API request here to warm up
# the system and allow the tests to assume their requests won't take multiple seconds.
user = self.example_user("hamlet")
self.api_get(user, "/api/v1/messages")
settings.RATE_LIMITING = True
add_ratelimit_rule(1, 5)
def tearDown(self) -> None:
settings.RATE_LIMITING = False
remove_ratelimit_rule(1, 5)
super().tearDown()
def send_api_message(self, user: UserProfile, content: str) -> HttpResponse:
return self.api_post(
user,
"/api/v1/messages",
{
"type": "stream",
"to": "Verona",
"client": "test suite",
"content": content,
"topic": "whatever",
},
)
def send_unauthed_api_request(self, **kwargs: Any) -> HttpResponse:
result = self.client_get("/json/messages", **kwargs)
# We're not making a correct request here, but rate-limiting is supposed
# to happen before the request fails due to not being correctly made. Thus
# we expect either an 400 error if the request is allowed by the rate limiter,
# or 429 if we're above the limit. We don't expect to see other status codes here,
# so we assert for safety.
self.assertIn(result.status_code, [400, 429])
return result
def test_headers(self) -> None:
user = self.example_user("hamlet")
RateLimitedUser(user).clear_history()
result = self.send_api_message(user, "some stuff")
self.assertTrue("X-RateLimit-Remaining" in result)
self.assertTrue("X-RateLimit-Limit" in result)
self.assertTrue("X-RateLimit-Reset" in result)
def test_ratelimit_decrease(self) -> None:
user = self.example_user("hamlet")
RateLimitedUser(user).clear_history()
result = self.send_api_message(user, "some stuff")
limit = int(result["X-RateLimit-Remaining"])
result = self.send_api_message(user, "some stuff 2")
newlimit = int(result["X-RateLimit-Remaining"])
self.assertEqual(limit, newlimit + 1)
def do_test_hit_ratelimits(
self,
request_func: Callable[[], HttpResponse],
is_json: bool = True,
) -> HttpResponse:
def api_assert_func(result: HttpResponse) -> None:
self.assertEqual(result.status_code, 429)
self.assertEqual(result.headers["Content-Type"], "application/json")
json = result.json()
self.assertEqual(json.get("result"), "error")
self.assertIn("API usage exceeded rate limit", json.get("msg"))
self.assertEqual(json.get("retry-after"), 0.5)
self.assertTrue("Retry-After" in result)
self.assertEqual(result["Retry-After"], "0.5")
def user_facing_assert_func(result: HttpResponse) -> None:
self.assertEqual(result.status_code, 429)
self.assertNotEqual(result.headers["Content-Type"], "application/json")
self.assert_in_response("Rate limit exceeded.", result)
if is_json:
assert_func = api_assert_func
else:
assert_func = user_facing_assert_func
start_time = time.time()
for i in range(6):
with mock.patch("time.time", return_value=(start_time + i * 0.1)):
result = request_func()
if i < 5:
self.assertNotEqual(result.status_code, 429)
assert_func(result)
# We simulate waiting a second here, rather than force-clearing our history,
# to make sure the rate-limiting code automatically forgives a user
# after some time has passed.
with mock.patch("time.time", return_value=(start_time + 1.01)):
result = request_func()
self.assertNotEqual(result.status_code, 429)
def test_hit_ratelimits_as_user(self) -> None:
user = self.example_user("cordelia")
RateLimitedUser(user).clear_history()
self.do_test_hit_ratelimits(lambda: self.send_api_message(user, "some stuff"))
@rate_limit_rule(1, 5, domain="api_by_ip")
def test_hit_ratelimits_as_ip(self) -> None:
self.do_test_hit_ratelimits(self.send_unauthed_api_request)
# Other IPs should not be rate-limited
resp = self.send_unauthed_api_request(REMOTE_ADDR="127.0.0.2")
self.assertNotEqual(resp.status_code, 429)
@rate_limit_rule(1, 5, domain="sends_email_by_ip")
def test_create_realm_rate_limiting(self) -> None:
with self.settings(OPEN_REALM_CREATION=True):
self.do_test_hit_ratelimits(
lambda: self.client_post("/new/", {"email": "new@zulip.com"}),
is_json=False,
)
@rate_limit_rule(1, 5, domain="sends_email_by_ip")
def test_find_account_rate_limiting(self) -> None:
self.do_test_hit_ratelimits(
lambda: self.client_post("/accounts/find/", {"emails": "new@zulip.com"}),
is_json=False,
)
# Test whether submitting multiple emails is handled correctly.
# The limit is set to 10 per second, so 5 requests with 2 emails
# submitted in each should be allowed.
@rate_limit_rule(1, 10, domain="sends_email_by_ip")
def test_find_account_rate_limiting_multiple(self) -> None:
self.do_test_hit_ratelimits(
lambda: self.client_post("/accounts/find/", {"emails": "new@zulip.com,new2@zulip.com"}),
is_json=False,
)
# If I submit with 3 emails and the rate-limit is 2, I should get
# a 429 and not send any emails.
@rate_limit_rule(1, 2, domain="sends_email_by_ip")
def test_find_account_rate_limiting_multiple_one_request(self) -> None:
emails = [
"iago@zulip.com",
"cordelia@zulip.com",
"hamlet@zulip.com",
]
resp = self.client_post("/accounts/find/", {"emails": ",".join(emails)})
self.assertEqual(resp.status_code, 429)
from django.core.mail import outbox
self.assert_length(outbox, 0)
@rate_limit_rule(1, 5, domain="sends_email_by_ip")
def test_combined_ip_limits(self) -> None:
# Alternate requests to /new/ and /accounts/find/
request_count = 0
def alternate_requests() -> HttpResponse:
nonlocal request_count
request_count += 1
if request_count % 2 == 1:
return self.client_post("/new/", {"email": "new@zulip.com"})
else:
return self.client_post("/accounts/find/", {"emails": "new@zulip.com"})
self.do_test_hit_ratelimits(alternate_requests, is_json=False)
@skipUnless(settings.ZILENCER_ENABLED, "requires zilencer")
@rate_limit_rule(1, 5, domain="api_by_remote_server")
def test_hit_ratelimits_as_remote_server(self) -> None:
server_uuid = "1234-abcd"
server = RemoteZulipServer(
uuid=server_uuid,
api_key="magic_secret_api_key",
hostname="demo.example.com",
last_updated=timezone_now(),
)
server.save()
endpoint = "/api/v1/remotes/push/register"
payload = {"user_id": 10, "token": "111222", "token_kind": PushDeviceToken.GCM}
try:
# Remote servers can only make requests to the root subdomain.
original_default_subdomain = self.DEFAULT_SUBDOMAIN
self.DEFAULT_SUBDOMAIN = ""
RateLimitedRemoteZulipServer(server).clear_history()
with self.assertLogs("zerver.lib.rate_limiter", level="WARNING") as m:
self.do_test_hit_ratelimits(lambda: self.uuid_post(server_uuid, endpoint, payload))
self.assertEqual(
m.output,
[
"WARNING:zerver.lib.rate_limiter:Remote server <RemoteZulipServer demo.example.com 1234-abcd> exceeded rate limits on domain api_by_remote_server"
],
)
finally:
self.DEFAULT_SUBDOMAIN = original_default_subdomain
def test_hit_ratelimiterlockingexception(self) -> None:
user = self.example_user("cordelia")
RateLimitedUser(user).clear_history()
with mock.patch(
"zerver.lib.rate_limiter.RedisRateLimiterBackend.incr_ratelimit",
side_effect=RateLimiterLockingException,
):
with self.assertLogs("zerver.lib.rate_limiter", level="WARNING") as m:
result = self.send_api_message(user, "some stuff")
self.assertEqual(result.status_code, 429)
self.assertEqual(
m.output,
[
"WARNING:zerver.lib.rate_limiter:Deadlock trying to incr_ratelimit for {}".format(
f"RateLimitedUser:{user.id}:api_by_user"
)
],
)
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
import os
import unittest
from trac.db.api import DatabaseManager, _parse_db_str, get_column_names, \
with_transaction
from trac.db.schema import Column, Table
from trac.test import EnvironmentStub, Mock
from trac.util.concurrency import ThreadLocal
class Connection(object):
committed = False
rolledback = False
def commit(self):
self.committed = True
def rollback(self):
self.rolledback = True
class Error(Exception):
pass
def make_env(get_cnx):
from trac.core import ComponentManager
return Mock(ComponentManager, components={DatabaseManager:
Mock(get_connection=get_cnx,
_transaction_local=ThreadLocal(wdb=None, rdb=None))})
class WithTransactionTest(unittest.TestCase):
def test_successful_transaction(self):
db = Connection()
env = make_env(lambda: db)
@with_transaction(env)
def do_transaction(db):
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(db.committed and not db.rolledback)
def test_failed_transaction(self):
db = Connection()
env = make_env(lambda: db)
try:
@with_transaction(env)
def do_transaction(db):
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
except Error:
pass
self.assertTrue(not db.committed and db.rolledback)
def test_implicit_nesting_success(self):
env = make_env(Connection)
dbs = [None, None]
@with_transaction(env)
def level0(db):
dbs[0] = db
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(dbs[0].committed and not dbs[0].rolledback)
def test_implicit_nesting_failure(self):
env = make_env(Connection)
dbs = [None, None]
try:
@with_transaction(env)
def level0(db):
dbs[0] = db
try:
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
except Error:
self.assertTrue(not db.committed and not db.rolledback)
raise
self.fail()
except Error:
pass
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and dbs[0].rolledback)
def test_explicit_success(self):
db = Connection()
env = make_env(lambda: None)
@with_transaction(env, db)
def do_transaction(idb):
self.assertTrue(idb is db)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
def test_explicit_failure(self):
db = Connection()
env = make_env(lambda: None)
try:
@with_transaction(env, db)
def do_transaction(idb):
self.assertTrue(idb is db)
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
except Error:
pass
self.assertTrue(not db.committed and not db.rolledback)
def test_implicit_in_explicit_success(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
@with_transaction(env, db)
def level0(db):
dbs[0] = db
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and not dbs[0].rolledback)
def test_implicit_in_explicit_failure(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
try:
@with_transaction(env, db)
def level0(db):
dbs[0] = db
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
self.fail()
except Error:
pass
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and not dbs[0].rolledback)
def test_explicit_in_implicit_success(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
@with_transaction(env)
def level0(db):
dbs[0] = db
@with_transaction(env, db)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(dbs[0].committed and not dbs[0].rolledback)
def test_explicit_in_implicit_failure(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
try:
@with_transaction(env)
def level0(db):
dbs[0] = db
@with_transaction(env, db)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
self.fail()
except Error:
pass
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and dbs[0].rolledback)
def test_invalid_nesting(self):
env = make_env(Connection)
try:
@with_transaction(env)
def level0(db):
@with_transaction(env, Connection())
def level1(db):
raise Error()
raise Error()
raise Error()
except AssertionError:
pass
class ParseConnectionStringTestCase(unittest.TestCase):
def test_sqlite_relative(self):
# Default syntax for specifying DB path relative to the environment
# directory
self.assertEqual(('sqlite', {'path': 'db/trac.db'}),
_parse_db_str('sqlite:db/trac.db'))
def test_sqlite_absolute(self):
# Standard syntax
self.assertEqual(('sqlite', {'path': '/var/db/trac.db'}),
_parse_db_str('sqlite:///var/db/trac.db'))
# Legacy syntax
self.assertEqual(('sqlite', {'path': '/var/db/trac.db'}),
_parse_db_str('sqlite:/var/db/trac.db'))
def test_sqlite_with_timeout_param(self):
# In-memory database
self.assertEqual(('sqlite', {'path': 'db/trac.db',
'params': {'timeout': '10000'}}),
_parse_db_str('sqlite:db/trac.db?timeout=10000'))
def test_sqlite_windows_path(self):
# In-memory database
os_name = os.name
try:
os.name = 'nt'
self.assertEqual(('sqlite', {'path': 'C:/project/db/trac.db'}),
_parse_db_str('sqlite:C|/project/db/trac.db'))
finally:
os.name = os_name
def test_postgres_simple(self):
self.assertEqual(('postgres', {'host': 'localhost', 'path': '/trac'}),
_parse_db_str('postgres://localhost/trac'))
def test_postgres_with_port(self):
self.assertEqual(('postgres', {'host': 'localhost', 'port': 9431,
'path': '/trac'}),
_parse_db_str('postgres://localhost:9431/trac'))
def test_postgres_with_creds(self):
self.assertEqual(('postgres', {'user': 'john', 'password': 'letmein',
'host': 'localhost', 'port': 9431,
'path': '/trac'}),
_parse_db_str('postgres://john:letmein@localhost:9431/trac'))
def test_postgres_with_quoted_password(self):
self.assertEqual(('postgres', {'user': 'john', 'password': ':@/',
'host': 'localhost', 'path': '/trac'}),
_parse_db_str('postgres://john:%3a%40%2f@localhost/trac'))
def test_mysql_simple(self):
self.assertEqual(('mysql', {'host': 'localhost', 'path': '/trac'}),
_parse_db_str('mysql://localhost/trac'))
def test_mysql_with_creds(self):
self.assertEqual(('mysql', {'user': 'john', 'password': 'letmein',
'host': 'localhost', 'port': 3306,
'path': '/trac'}),
_parse_db_str('mysql://john:letmein@localhost:3306/trac'))
class StringsTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
def test_insert_unicode(self):
self.env.db_transaction(
"INSERT INTO system (name,value) VALUES (%s,%s)",
('test-unicode', u'ünicöde'))
self.assertEqual([(u'ünicöde',)], self.env.db_query(
"SELECT value FROM system WHERE name='test-unicode'"))
def test_insert_empty(self):
from trac.util.text import empty
self.env.db_transaction(
"INSERT INTO system (name,value) VALUES (%s,%s)",
('test-empty', empty))
self.assertEqual([(u'',)], self.env.db_query(
"SELECT value FROM system WHERE name='test-empty'"))
def test_insert_markup(self):
from genshi.core import Markup
self.env.db_transaction(
"INSERT INTO system (name,value) VALUES (%s,%s)",
('test-markup', Markup(u'<em>märkup</em>')))
self.assertEqual([(u'<em>märkup</em>',)], self.env.db_query(
"SELECT value FROM system WHERE name='test-markup'"))
def test_quote(self):
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute('SELECT 1 AS %s' % \
db.quote(r'alpha\`\"\'\\beta``gamma""delta'))
self.assertEqual(r'alpha\`\"\'\\beta``gamma""delta',
get_column_names(cursor)[0])
def test_quoted_id_with_percent(self):
db = self.env.get_read_db()
name = """%?`%s"%'%%"""
def test(db, logging=False):
cursor = db.cursor()
if logging:
cursor.log = self.env.log
cursor.execute('SELECT 1 AS ' + db.quote(name))
self.assertEqual(name, get_column_names(cursor)[0])
cursor.execute('SELECT %s AS ' + db.quote(name), (42,))
self.assertEqual(name, get_column_names(cursor)[0])
cursor.executemany("UPDATE system SET value=%s WHERE "
"1=(SELECT 0 AS " + db.quote(name) + ")",
[])
cursor.executemany("UPDATE system SET value=%s WHERE "
"1=(SELECT 0 AS " + db.quote(name) + ")",
[('42',), ('43',)])
test(db)
test(db, logging=True)
class ConnectionTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.schema = [
Table('HOURS', key='ID')[
Column('ID', auto_increment=True),
Column('AUTHOR')],
Table('blog', key='bid')[
Column('bid', auto_increment=True),
Column('author')
]
]
self.env.global_databasemanager.drop_tables(self.schema)
self.env.global_databasemanager.create_tables(self.schema)
def tearDown(self):
self.env.global_databasemanager.drop_tables(self.schema)
self.env.reset_db()
def test_get_last_id(self):
id1 = id2 = None
q = "INSERT INTO report (author) VALUES ('anonymous')"
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute(q)
# Row ID correct before...
id1 = db.get_last_id(cursor, 'report')
self.assertNotEqual(0, id1)
db.commit()
cursor.execute(q)
# ... and after commit()
db.commit()
id2 = db.get_last_id(cursor, 'report')
self.assertEqual(id1 + 1, id2)
def test_update_sequence_default_column(self):
self.env.db_transaction(
"INSERT INTO report (id, author) VALUES (42, 'anonymous')")
with self.env.db_transaction as db:
cursor = db.cursor()
db.update_sequence(cursor, 'report', 'id')
self.env.db_transaction(
"INSERT INTO report (author) VALUES ('next-id')")
self.assertEqual(43, self.env.db_query(
"SELECT id FROM report WHERE author='next-id'")[0][0])
def test_update_sequence_nondefault_column(self):
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute(
"INSERT INTO blog (bid, author) VALUES (42, 'anonymous')")
db.update_sequence(cursor, 'blog', 'bid')
self.env.db_transaction(
"INSERT INTO blog (author) VALUES ('next-id')")
self.assertEqual(43, self.env.db_query(
"SELECT bid FROM blog WHERE author='next-id'")[0][0])
def test_identifiers_need_quoting(self):
"""Test for regression described in comment:4:ticket:11512."""
with self.env.db_transaction as db:
db("INSERT INTO %s (%s, %s) VALUES (42, 'anonymous')"
% (db.quote('HOURS'), db.quote('ID'), db.quote('AUTHOR')))
cursor = db.cursor()
db.update_sequence(cursor, 'HOURS', 'ID')
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute(
"INSERT INTO %s (%s) VALUES ('next-id')"
% (db.quote('HOURS'), db.quote('AUTHOR')))
last_id = db.get_last_id(cursor, 'HOURS', 'ID')
self.assertEqual(43, last_id)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ParseConnectionStringTestCase))
suite.addTest(unittest.makeSuite(StringsTestCase))
suite.addTest(unittest.makeSuite(ConnectionTestCase))
suite.addTest(unittest.makeSuite(WithTransactionTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
1.0.2dev: Minor refactoring to test cases. Refs #11512.
Separate arrange-act-assert blocks. Move asserts outside of context manager to make tested behavior more clear.
git-svn-id: 3aa280b99a9b480b02b1e2eda394e64594ad3297@12704 af82e41b-90c4-0310-8c96-b1721e28e2e2
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
import os
import unittest
from trac.db.api import DatabaseManager, _parse_db_str, get_column_names, \
with_transaction
from trac.db.schema import Column, Table
from trac.test import EnvironmentStub, Mock
from trac.util.concurrency import ThreadLocal
class Connection(object):
committed = False
rolledback = False
def commit(self):
self.committed = True
def rollback(self):
self.rolledback = True
class Error(Exception):
pass
def make_env(get_cnx):
from trac.core import ComponentManager
return Mock(ComponentManager, components={DatabaseManager:
Mock(get_connection=get_cnx,
_transaction_local=ThreadLocal(wdb=None, rdb=None))})
class WithTransactionTest(unittest.TestCase):
def test_successful_transaction(self):
db = Connection()
env = make_env(lambda: db)
@with_transaction(env)
def do_transaction(db):
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(db.committed and not db.rolledback)
def test_failed_transaction(self):
db = Connection()
env = make_env(lambda: db)
try:
@with_transaction(env)
def do_transaction(db):
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
except Error:
pass
self.assertTrue(not db.committed and db.rolledback)
def test_implicit_nesting_success(self):
env = make_env(Connection)
dbs = [None, None]
@with_transaction(env)
def level0(db):
dbs[0] = db
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(dbs[0].committed and not dbs[0].rolledback)
def test_implicit_nesting_failure(self):
env = make_env(Connection)
dbs = [None, None]
try:
@with_transaction(env)
def level0(db):
dbs[0] = db
try:
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
except Error:
self.assertTrue(not db.committed and not db.rolledback)
raise
self.fail()
except Error:
pass
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and dbs[0].rolledback)
def test_explicit_success(self):
db = Connection()
env = make_env(lambda: None)
@with_transaction(env, db)
def do_transaction(idb):
self.assertTrue(idb is db)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
def test_explicit_failure(self):
db = Connection()
env = make_env(lambda: None)
try:
@with_transaction(env, db)
def do_transaction(idb):
self.assertTrue(idb is db)
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
except Error:
pass
self.assertTrue(not db.committed and not db.rolledback)
def test_implicit_in_explicit_success(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
@with_transaction(env, db)
def level0(db):
dbs[0] = db
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and not dbs[0].rolledback)
def test_implicit_in_explicit_failure(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
try:
@with_transaction(env, db)
def level0(db):
dbs[0] = db
@with_transaction(env)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
self.fail()
except Error:
pass
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and not dbs[0].rolledback)
def test_explicit_in_implicit_success(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
@with_transaction(env)
def level0(db):
dbs[0] = db
@with_transaction(env, db)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(not db.committed and not db.rolledback)
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(dbs[0].committed and not dbs[0].rolledback)
def test_explicit_in_implicit_failure(self):
db = Connection()
env = make_env(lambda: db)
dbs = [None, None]
try:
@with_transaction(env)
def level0(db):
dbs[0] = db
@with_transaction(env, db)
def level1(db):
dbs[1] = db
self.assertTrue(not db.committed and not db.rolledback)
raise Error()
self.fail()
self.fail()
except Error:
pass
self.assertTrue(dbs[0] is not None)
self.assertTrue(dbs[0] is dbs[1])
self.assertTrue(not dbs[0].committed and dbs[0].rolledback)
def test_invalid_nesting(self):
env = make_env(Connection)
try:
@with_transaction(env)
def level0(db):
@with_transaction(env, Connection())
def level1(db):
raise Error()
raise Error()
raise Error()
except AssertionError:
pass
class ParseConnectionStringTestCase(unittest.TestCase):
def test_sqlite_relative(self):
# Default syntax for specifying DB path relative to the environment
# directory
self.assertEqual(('sqlite', {'path': 'db/trac.db'}),
_parse_db_str('sqlite:db/trac.db'))
def test_sqlite_absolute(self):
# Standard syntax
self.assertEqual(('sqlite', {'path': '/var/db/trac.db'}),
_parse_db_str('sqlite:///var/db/trac.db'))
# Legacy syntax
self.assertEqual(('sqlite', {'path': '/var/db/trac.db'}),
_parse_db_str('sqlite:/var/db/trac.db'))
def test_sqlite_with_timeout_param(self):
# In-memory database
self.assertEqual(('sqlite', {'path': 'db/trac.db',
'params': {'timeout': '10000'}}),
_parse_db_str('sqlite:db/trac.db?timeout=10000'))
def test_sqlite_windows_path(self):
# In-memory database
os_name = os.name
try:
os.name = 'nt'
self.assertEqual(('sqlite', {'path': 'C:/project/db/trac.db'}),
_parse_db_str('sqlite:C|/project/db/trac.db'))
finally:
os.name = os_name
def test_postgres_simple(self):
self.assertEqual(('postgres', {'host': 'localhost', 'path': '/trac'}),
_parse_db_str('postgres://localhost/trac'))
def test_postgres_with_port(self):
self.assertEqual(('postgres', {'host': 'localhost', 'port': 9431,
'path': '/trac'}),
_parse_db_str('postgres://localhost:9431/trac'))
def test_postgres_with_creds(self):
self.assertEqual(('postgres', {'user': 'john', 'password': 'letmein',
'host': 'localhost', 'port': 9431,
'path': '/trac'}),
_parse_db_str('postgres://john:letmein@localhost:9431/trac'))
def test_postgres_with_quoted_password(self):
self.assertEqual(('postgres', {'user': 'john', 'password': ':@/',
'host': 'localhost', 'path': '/trac'}),
_parse_db_str('postgres://john:%3a%40%2f@localhost/trac'))
def test_mysql_simple(self):
self.assertEqual(('mysql', {'host': 'localhost', 'path': '/trac'}),
_parse_db_str('mysql://localhost/trac'))
def test_mysql_with_creds(self):
self.assertEqual(('mysql', {'user': 'john', 'password': 'letmein',
'host': 'localhost', 'port': 3306,
'path': '/trac'}),
_parse_db_str('mysql://john:letmein@localhost:3306/trac'))
class StringsTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
def test_insert_unicode(self):
self.env.db_transaction(
"INSERT INTO system (name,value) VALUES (%s,%s)",
('test-unicode', u'ünicöde'))
self.assertEqual([(u'ünicöde',)], self.env.db_query(
"SELECT value FROM system WHERE name='test-unicode'"))
def test_insert_empty(self):
from trac.util.text import empty
self.env.db_transaction(
"INSERT INTO system (name,value) VALUES (%s,%s)",
('test-empty', empty))
self.assertEqual([(u'',)], self.env.db_query(
"SELECT value FROM system WHERE name='test-empty'"))
def test_insert_markup(self):
from genshi.core import Markup
self.env.db_transaction(
"INSERT INTO system (name,value) VALUES (%s,%s)",
('test-markup', Markup(u'<em>märkup</em>')))
self.assertEqual([(u'<em>märkup</em>',)], self.env.db_query(
"SELECT value FROM system WHERE name='test-markup'"))
def test_quote(self):
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute('SELECT 1 AS %s' % \
db.quote(r'alpha\`\"\'\\beta``gamma""delta'))
self.assertEqual(r'alpha\`\"\'\\beta``gamma""delta',
get_column_names(cursor)[0])
def test_quoted_id_with_percent(self):
db = self.env.get_read_db()
name = """%?`%s"%'%%"""
def test(db, logging=False):
cursor = db.cursor()
if logging:
cursor.log = self.env.log
cursor.execute('SELECT 1 AS ' + db.quote(name))
self.assertEqual(name, get_column_names(cursor)[0])
cursor.execute('SELECT %s AS ' + db.quote(name), (42,))
self.assertEqual(name, get_column_names(cursor)[0])
cursor.executemany("UPDATE system SET value=%s WHERE "
"1=(SELECT 0 AS " + db.quote(name) + ")",
[])
cursor.executemany("UPDATE system SET value=%s WHERE "
"1=(SELECT 0 AS " + db.quote(name) + ")",
[('42',), ('43',)])
test(db)
test(db, logging=True)
class ConnectionTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.schema = [
Table('HOURS', key='ID')[
Column('ID', auto_increment=True),
Column('AUTHOR')],
Table('blog', key='bid')[
Column('bid', auto_increment=True),
Column('author')
]
]
self.env.global_databasemanager.drop_tables(self.schema)
self.env.global_databasemanager.create_tables(self.schema)
def tearDown(self):
self.env.global_databasemanager.drop_tables(self.schema)
self.env.reset_db()
def test_get_last_id(self):
q = "INSERT INTO report (author) VALUES ('anonymous')"
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute(q)
# Row ID correct before...
id1 = db.get_last_id(cursor, 'report')
db.commit()
cursor.execute(q)
# ... and after commit()
db.commit()
id2 = db.get_last_id(cursor, 'report')
self.assertNotEqual(0, id1)
self.assertEqual(id1 + 1, id2)
def test_update_sequence_default_column(self):
with self.env.db_transaction as db:
db("INSERT INTO report (id, author) VALUES (42, 'anonymous')")
cursor = db.cursor()
db.update_sequence(cursor, 'report', 'id')
self.env.db_transaction(
"INSERT INTO report (author) VALUES ('next-id')")
self.assertEqual(43, self.env.db_query(
"SELECT id FROM report WHERE author='next-id'")[0][0])
def test_update_sequence_nondefault_column(self):
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute(
"INSERT INTO blog (bid, author) VALUES (42, 'anonymous')")
db.update_sequence(cursor, 'blog', 'bid')
self.env.db_transaction(
"INSERT INTO blog (author) VALUES ('next-id')")
self.assertEqual(43, self.env.db_query(
"SELECT bid FROM blog WHERE author='next-id'")[0][0])
def test_identifiers_need_quoting(self):
"""Test for regression described in comment:4:ticket:11512."""
with self.env.db_transaction as db:
db("INSERT INTO %s (%s, %s) VALUES (42, 'anonymous')"
% (db.quote('HOURS'), db.quote('ID'), db.quote('AUTHOR')))
cursor = db.cursor()
db.update_sequence(cursor, 'HOURS', 'ID')
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute(
"INSERT INTO %s (%s) VALUES ('next-id')"
% (db.quote('HOURS'), db.quote('AUTHOR')))
last_id = db.get_last_id(cursor, 'HOURS', 'ID')
self.assertEqual(43, last_id)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ParseConnectionStringTestCase))
suite.addTest(unittest.makeSuite(StringsTestCase))
suite.addTest(unittest.makeSuite(ConnectionTestCase))
suite.addTest(unittest.makeSuite(WithTransactionTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
#!/usr/bin/env python3
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
DATA = '\033[5;30;47m'
TITLE = '\033[7;34;47m'
C_NONE = '\033[0;00m'
C_RED = '\033[1;31m'
C_GREEN = '\033[1;32m'
def print_title(content):
print('\n')
print(bcolors.TITLE + content + bcolors.ENDC)
def print_error(content):
print(bcolors.C_RED + "[ERROR] " + content + bcolors.ENDC)
def print_warning(content):
print(bcolors.WARNING + "[WARNING] " + content + bcolors.ENDC)
def print_info(content):
print(bcolors.OKGREEN + "[INFO] " + content + bcolors.ENDC)
def print_progress(content):
print(bcolors.OKBLUE + "[INFO] " + content + bcolors.ENDC)
def print_data(content):
print(bcolors.DATA)
print(content)
print(bcolors.ENDC)
# print_format_table() refers to
# https://stackoverflow.com/posts/21786287/revisions
def print_format_table():
"""
prints table of formatted text format options
"""
for style in range(8):
for fg in range(30, 38):
s1 = ''
for bg in range(40, 48):
format = ';'.join([str(style), str(fg), str(bg)])
s1 += '\x1b[%sm %s \x1b[0m' % (format, format)
print(s1)
print('\n')
support highlight print
#!/usr/bin/env python3
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
DATA = '\033[5;30;47m'
TITLE = '\033[7;34;47m'
C_NONE = '\033[0;00m'
C_RED = '\033[1;31m'
C_GREEN = '\033[1;32m'
def print_title(content):
print('\n')
print(bcolors.TITLE + content + bcolors.ENDC)
def print_error(content):
print(bcolors.C_RED + "[ERROR] " + content + bcolors.ENDC)
def print_warning(content):
print(bcolors.WARNING + "[WARNING] " + content + bcolors.ENDC)
def print_info(content):
print(bcolors.OKGREEN + "[INFO] " + content + bcolors.ENDC)
def print_progress(content):
print(bcolors.OKBLUE + "[INFO] " + content + bcolors.ENDC)
def highlight(content):
return bcolors.OKGREEN + content + bcolors.ENDC
def print_data(content):
print(bcolors.DATA)
print(content)
print(bcolors.ENDC)
# print_format_table() refers to
# https://stackoverflow.com/posts/21786287/revisions
def print_format_table():
"""
prints table of formatted text format options
"""
for style in range(8):
for fg in range(30, 38):
s1 = ''
for bg in range(40, 48):
format = ';'.join([str(style), str(fg), str(bg)])
s1 += '\x1b[%sm %s \x1b[0m' % (format, format)
print(s1)
print('\n')
|
from __future__ import absolute_import
import json
import logging
import datetime as dt
import calendar
import decimal
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
try:
from dateutil.relativedelta import relativedelta
is_dateutil = True
except ImportError:
is_dateutil = False
from .settings import settings
log = logging.getLogger(__name__)
millifactor = 10**6.0
class BokehJSONEncoder(json.JSONEncoder):
def transform_series(self, obj):
"""transform series
"""
vals = obj.values
return self.transform_array(vals)
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
def transform_array(self, obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if self.legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / millifactor).tolist()
# else punt.
else:
return obj.astype('datetime64[ms]').astype('int64').tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return self.transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(self, obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def transform_python_types(self, obj):
"""handle special scalars, default to default json encoder
"""
# Pandas Timestamp
if is_pandas and isinstance(obj, pd.tslib.Timestamp):
return obj.value / millifactor #nanosecond to millisecond
elif np.issubdtype(type(obj), np.float):
return float(obj)
elif np.issubdtype(type(obj), np.int):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Datetime
# datetime is a subclass of date.
elif isinstance(obj, dt.datetime):
return calendar.timegm(obj.timetuple()) * 1000. + obj.microsecond / 1000.
# Date
elif isinstance(obj, dt.date):
return calendar.timegm(obj.timetuple()) * 1000.
# Numpy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
return (epoch_delta / np.timedelta64(1, 'ms'))
# Time
elif isinstance(obj, dt.time):
return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
# Decimal
elif isinstance(obj, decimal.Decimal):
return float(obj)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from .plot_object import PlotObject
from .properties import HasProps
from .colors import Color
## array types
if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
return self.transform_series(obj)
elif isinstance(obj, np.ndarray):
return self.transform_array(obj)
elif isinstance(obj, PlotObject):
return obj.ref
elif isinstance(obj, HasProps):
return obj.changed_properties_with_values()
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
if settings.pretty(False):
kwargs["indent"] = 4
return json.dumps(obj, cls=encoder, **kwargs)
deserialize_json = json.loads
serialize_web = serialize_json
deserialize_web = deserialize_json
def status_obj(status):
return {'msgtype': 'status',
'status': status}
def error_obj(error_msg):
return {
'msgtype': 'error',
'error_msg': error_msg}
Allow protocal to preserve microsecond part in np.datetime
from __future__ import absolute_import
import json
import logging
import datetime as dt
import calendar
import decimal
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
try:
from dateutil.relativedelta import relativedelta
is_dateutil = True
except ImportError:
is_dateutil = False
from .settings import settings
log = logging.getLogger(__name__)
millifactor = 10**6.0
class BokehJSONEncoder(json.JSONEncoder):
def transform_series(self, obj):
"""transform series
"""
vals = obj.values
return self.transform_array(vals)
# Check for astype failures (putative Numpy < 1.7)
dt2001 = np.datetime64('2001')
legacy_datetime64 = (dt2001.astype('int64') ==
dt2001.astype('datetime64[ms]').astype('int64'))
def transform_array(self, obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
if self.legacy_datetime64:
if obj.dtype == np.dtype('datetime64[ns]'):
return (obj.astype('int64') / millifactor).tolist()
# else punt.
else:
return (obj.astype('datetime64[us]').astype('int64') / 1000.).tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return self.transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(self, obj):
"""handles nans/inf conversion
"""
if isinstance(obj, np.ma.MaskedArray):
obj = obj.filled(np.nan) # Set masked values to nan
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def transform_python_types(self, obj):
"""handle special scalars, default to default json encoder
"""
# Pandas Timestamp
if is_pandas and isinstance(obj, pd.tslib.Timestamp):
return obj.value / millifactor #nanosecond to millisecond
elif np.issubdtype(type(obj), np.float):
return float(obj)
elif np.issubdtype(type(obj), np.int):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Datetime
# datetime is a subclass of date.
elif isinstance(obj, dt.datetime):
return calendar.timegm(obj.timetuple()) * 1000. + obj.microsecond / 1000.
# Date
elif isinstance(obj, dt.date):
return calendar.timegm(obj.timetuple()) * 1000.
# Numpy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
return (epoch_delta / np.timedelta64(1, 'ms'))
# Time
elif isinstance(obj, dt.time):
return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
# Decimal
elif isinstance(obj, decimal.Decimal):
return float(obj)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from .plot_object import PlotObject
from .properties import HasProps
from .colors import Color
## array types
if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
return self.transform_series(obj)
elif isinstance(obj, np.ndarray):
return self.transform_array(obj)
elif isinstance(obj, PlotObject):
return obj.ref
elif isinstance(obj, HasProps):
return obj.changed_properties_with_values()
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
if settings.pretty(False):
kwargs["indent"] = 4
return json.dumps(obj, cls=encoder, **kwargs)
deserialize_json = json.loads
serialize_web = serialize_json
deserialize_web = deserialize_json
def status_obj(status):
return {'msgtype': 'status',
'status': status}
def error_obj(error_msg):
return {
'msgtype': 'error',
'error_msg': error_msg}
|
import argparse
import os
import io
import shutil
import subprocess
from utils import create_manifest
parser = argparse.ArgumentParser(description='Processes and downloads an4.')
parser.add_argument('--target_dir', default='an4_dataset/', help='Path to save dataset')
parser.add_argument('--sample_rate', default=16000, type=int, help='Sample rate')
args = parser.parse_args()
def _format_data(root_path, data_tag, name, wav_folder):
data_path = args.target_dir + data_tag + '/' + name + '/'
new_transcript_path = data_path + '/txt/'
new_wav_path = data_path + '/wav/'
os.makedirs(new_transcript_path)
os.makedirs(new_wav_path)
wav_path = root_path + 'wav/'
file_ids = root_path + 'etc/an4_%s.fileids' % data_tag
transcripts = root_path + 'etc/an4_%s.transcription' % data_tag
train_path = wav_path + wav_folder
_convert_audio_to_wav(train_path)
_format_files(file_ids, new_transcript_path, new_wav_path, transcripts, wav_path)
def _convert_audio_to_wav(train_path):
with os.popen('find %s -type f -name "*.raw"' % train_path) as pipe:
for line in pipe:
raw_path = line.strip()
new_path = line.replace('.raw', '.wav').strip()
cmd = 'sox -t raw -r %d -b 16 -e signed-integer -B -c 1 \"%s\" \"%s\"' % (
args.sample_rate, raw_path, new_path)
os.system(cmd)
def _format_files(file_ids, new_transcript_path, new_wav_path, transcripts, wav_path):
with open(file_ids, 'r') as f:
with open(transcripts, 'r') as t:
paths = f.readlines()
transcripts = t.readlines()
for x in range(len(paths)):
path = wav_path + paths[x].strip() + '.wav'
filename = path.split('/')[-1]
extracted_transcript = _process_transcript(transcripts, x)
current_path = os.path.abspath(path)
new_path = new_wav_path + filename
text_path = new_transcript_path + filename.replace('.wav', '.txt')
with io.FileIO(text_path, "w") as file:
file.write(extracted_transcript.encode('utf-8'))
os.rename(current_path, new_path)
def _process_transcript(transcripts, x):
extracted_transcript = transcripts[x].split('(')[0].strip("<s>").split('<')[0].strip().upper()
return extracted_transcript
def main():
root_path = 'an4/'
name = 'an4'
subprocess.call(['wget http://www.speech.cs.cmu.edu/databases/an4/an4_raw.bigendian.tar.gz'], shell=True)
subprocess.call(['tar -xzvf an4_raw.bigendian.tar.gz'], stdout=open(os.devnull, 'wb'), shell=True)
os.makedirs(args.target_dir)
_format_data(root_path, 'train', name, 'an4_clstk')
_format_data(root_path, 'test', name, 'an4test_clstk')
shutil.rmtree(root_path)
os.remove('an4_raw.bigendian.tar.gz')
train_path = args.target_dir + '/train/'
test_path = args.target_dir + '/test/'
print ('Creating manifests...')
create_manifest(train_path, 'an4_train')
create_manifest(test_path, 'an4_val')
if __name__ == '__main__':
main()
Refactor of an4 downloader
import argparse
import os
import io
import shutil
import tarfile
import wget
from utils import create_manifest
parser = argparse.ArgumentParser(description='Processes and downloads an4.')
parser.add_argument('--target_dir', default='an4_dataset/', help='Path to save dataset')
parser.add_argument('--sample_rate', default=16000, type=int, help='Sample rate')
args = parser.parse_args()
def _format_data(root_path, data_tag, name, wav_folder):
data_path = args.target_dir + data_tag + '/' + name + '/'
new_transcript_path = data_path + '/txt/'
new_wav_path = data_path + '/wav/'
os.makedirs(new_transcript_path)
os.makedirs(new_wav_path)
wav_path = root_path + 'wav/'
file_ids = root_path + 'etc/an4_%s.fileids' % data_tag
transcripts = root_path + 'etc/an4_%s.transcription' % data_tag
train_path = wav_path + wav_folder
_convert_audio_to_wav(train_path)
_format_files(file_ids, new_transcript_path, new_wav_path, transcripts, wav_path)
def _convert_audio_to_wav(train_path):
with os.popen('find %s -type f -name "*.raw"' % train_path) as pipe:
for line in pipe:
raw_path = line.strip()
new_path = line.replace('.raw', '.wav').strip()
cmd = 'sox -t raw -r %d -b 16 -e signed-integer -B -c 1 \"%s\" \"%s\"' % (
args.sample_rate, raw_path, new_path)
os.system(cmd)
def _format_files(file_ids, new_transcript_path, new_wav_path, transcripts, wav_path):
with open(file_ids, 'r') as f:
with open(transcripts, 'r') as t:
paths = f.readlines()
transcripts = t.readlines()
for x in range(len(paths)):
path = wav_path + paths[x].strip() + '.wav'
filename = path.split('/')[-1]
extracted_transcript = _process_transcript(transcripts, x)
current_path = os.path.abspath(path)
new_path = new_wav_path + filename
text_path = new_transcript_path + filename.replace('.wav', '.txt')
with io.FileIO(text_path, "w") as file:
file.write(extracted_transcript.encode('utf-8'))
os.rename(current_path, new_path)
def _process_transcript(transcripts, x):
extracted_transcript = transcripts[x].split('(')[0].strip("<s>").split('<')[0].strip().upper()
return extracted_transcript
def main():
root_path = 'an4/'
name = 'an4'
wget.download('http://www.speech.cs.cmu.edu/databases/an4/an4_raw.bigendian.tar.gz')
tar = tarfile.open('an4_raw.bigendian.tar.gz')
tar.extractall()
os.makedirs(args.target_dir)
_format_data(root_path, 'train', name, 'an4_clstk')
_format_data(root_path, 'test', name, 'an4test_clstk')
shutil.rmtree(root_path)
os.remove('an4_raw.bigendian.tar.gz')
train_path = args.target_dir + '/train/'
test_path = args.target_dir + '/test/'
print ('\n', 'Creating manifests...')
create_manifest(train_path, 'an4_train')
create_manifest(test_path, 'an4_val')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from pyqi.core.interfaces.optparse import (OptparseOption,
OptparseUsageExample,
OptparseResult)
from pyqi.core.interfaces.optparse.output_handler import write_string
from pyqi.commands.make_bash_completion import CommandConstructor
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2013, The QCLI Project"
__credits__ = ["Daniel McDonald", "Jai Ram Rideout", "Doug Wendel", "Greg Caporaso"]
__license__ = "BSD"
__version__ = "0.1.0-dev"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
# How you can use the command from the command line
### need to make sure comments desc usage_examples goes into make_cli template (make_optparse?)
usage_examples = [
OptparseUsageExample(ShortDesc="Create a bash completion script",
LongDesc="Create a bash completion script for use with a QCLI driver",
Ex="%prog --command-config-module pyqi.interfaces.optparse.config --driver-name pyqi -o ~/.bash_completion.d/pyqi")
]
# Parameter conversions tell the interface how to describe command line
# options
"""
inputs = [
OptparseOption(InputType=Tells the interface what to expect
Parameter=CommandConstructor.Parameters.getParameter('foo'), # nullable
Required=True|False, # can override a parameter (False ->True, but not True -> False)
LongName=Ideally the same as Parameter name...
ShortName=Shortform version, specific to CLI
Help=yup.
InputHandler=foo)
outputs = [
OptparseResult(OutputType=tells the interface something,
Parameter=CommandConstructor.Parameters.getParameter('foo') # nullable
Required=True|False, # can override a parameter (False -> True, but not True -> False)
OutputHandler=foo,
ResultKey=foo)
]
"""
### need to make sure comments desc input goes into make_cli template (make_optparse?)
inputs = [
OptparseOption(InputType=str,
Parameter=CommandConstructor.Parameters['command_config_module'],
# Required=True implied by Parameter
# Name='command_cfg_directory', implied by Parameter
ShortName=None,
# Help is pulled from parameter since Parameter is not None
InputHandler=None), # optparse handles str just fine
OptparseOption(InputType=str,
Parameter=CommandConstructor.Parameters['driver_name'],
# Required=True implied by Parameter
# Name='driver_name', implied by Parameter
ShortName=None,
# Help is pulled from parameter since Paramter is not None
InputHandler=None),
OptparseOption(InputType='new_filepath',
Parameter=None, #
Required=True,
Name='output_fp',
ShortName='o',
Help="Output file path",
InputHandler=None)
]
### need to make sure comments desc output goes into make_cli template (make_optparse?)
outputs = [
OptparseResult(OutputType=None, ### do we need an outputtype?
Parameter=None,
Name='output_fp', # if specified, must exist as an input
OutputHandler=write_string,
ResultKey='result')
]
# param_conversions = {
# #### directory is misnomer, this is a module path
# 'command_cfg_directory':ParameterConversion(ShortName=None,
# LongName='command_cfg_directory',
# CLType=str),
# 'driver_name':ParameterConversion(ShortName=None,
# LongName='driver_name',
# CLType=str),
# }
# The output map associated keys in the results returned from Command.run
# without output handlers
#output_map = {'result':OutputHandler(OptionName='output_fp',
# Function=write_string)
# }
#
## In case there are interface specific bits such as output files
#additional_options = [CLOption(Type='output_file',
# Help='the resulting configuration file',
# Name='output_fp',
# Required=True,
# LongName='output_fp',
# CLType='new_filepath',
# ShortName='o')
# ]
removed comments
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from pyqi.core.interfaces.optparse import (OptparseOption,
OptparseUsageExample,
OptparseResult)
from pyqi.core.interfaces.optparse.output_handler import write_string
from pyqi.commands.make_bash_completion import CommandConstructor
__author__ = "Daniel McDonald"
__copyright__ = "Copyright 2013, The QCLI Project"
__credits__ = ["Daniel McDonald", "Jai Ram Rideout", "Doug Wendel", "Greg Caporaso"]
__license__ = "BSD"
__version__ = "0.1.0-dev"
__maintainer__ = "Daniel McDonald"
__email__ = "mcdonadt@colorado.edu"
# How you can use the command from the command line
### need to make sure comments desc usage_examples goes into make_cli template (make_optparse?)
usage_examples = [
OptparseUsageExample(ShortDesc="Create a bash completion script",
LongDesc="Create a bash completion script for use with a QCLI driver",
Ex="%prog --command-config-module pyqi.interfaces.optparse.config --driver-name pyqi -o ~/.bash_completion.d/pyqi")
]
### need to make sure comments desc input goes into make_cli template (make_optparse?)
inputs = [
OptparseOption(InputType=str,
Parameter=CommandConstructor.Parameters['command_config_module'],
# Required=True implied by Parameter
# Name='command_cfg_directory', implied by Parameter
ShortName=None,
# Help is pulled from parameter since Parameter is not None
InputHandler=None), # optparse handles str just fine
OptparseOption(InputType=str,
Parameter=CommandConstructor.Parameters['driver_name'],
# Required=True implied by Parameter
# Name='driver_name', implied by Parameter
ShortName=None,
# Help is pulled from parameter since Paramter is not None
InputHandler=None),
OptparseOption(InputType='new_filepath',
Parameter=None, #
Required=True,
Name='output_fp',
ShortName='o',
Help="Output file path",
InputHandler=None)
]
### need to make sure comments desc output goes into make_cli template (make_optparse?)
outputs = [
OptparseResult(OutputType=None, ### do we need an outputtype?
Parameter=None,
Name='output_fp', # if specified, must exist as an input
OutputHandler=write_string,
ResultKey='result')
]
|
__all__ = ['shuffleMockCatalog', 'generate_upid']
import warnings
from itertools import izip
import numpy as np
from numpy.lib.recfunctions import rename_fields
def _iter_plateau_in_sorted_array(a):
k = np.where(a[1:] != a[:-1])[0]
k += 1
i = 0
for j in k:
yield i, j
i = j
yield i, len(a)
def _iter_indices_in_bins(bins, a):
s = a.argsort()
k = np.searchsorted(a, bins, 'right', sorter=s)
i = 0
for j in k:
yield s[i:j]
i = j
yield s[i:]
def _apply_rotation(pos, box_size):
half_box_size = box_size * 0.5
pos[pos > half_box_size] -= box_size
pos[pos < -half_box_size] += box_size
A = np.linalg.qr(np.random.randn(3,3))[0]
if np.linalg.det(A) < 0:
A *= -1
return np.dot(pos, A)
_axes = list('xyz')
def _get_xyz(a, ax_type=float):
return np.fromiter((a[ax] for ax in _axes), ax_type, 3)
def generate_upid(pid, id, recursive=True):
"""
To generate (or to fix) the upid of a halo catalog.
Parameters
----------
pid : array_like
An ndarray of integer that contains the parent IDs of each halo.
id : array_like
An ndarray of integer that contains the halo IDs.
recursive : bool, optional
Whether or not to run this function recursively. Default is True.
Returns
-------
upid : array_like
The ultimate parent IDs.
Examples
--------
>>> halos['upid'] = generate_upid(halos['pid'], halos['id'])
"""
pid = np.asanyarray(pid)
id = np.asanyarray(id)
s = pid.argsort()
idx = np.fromiter(_iter_plateau_in_sorted_array(pid[s]), \
np.dtype([('start', int), ('stop', int)]))
unique_pid = pid[s[idx['start']]]
if unique_pid[0] == -1:
unique_pid = unique_pid[1:]
idx = idx[1:]
host_flag = (pid == -1)
not_found = np.where(np.in1d(unique_pid, id[host_flag], True, True))[0]
if not len(not_found):
return pid
sub_flag = np.where(~host_flag)[0]
found = sub_flag[np.in1d(id[sub_flag], unique_pid[not_found], True)]
found = found[id[found].argsort()]
assert (id[found] == unique_pid[not_found]).all()
del host_flag, sub_flag, unique_pid
pid_old = pid.copy()
for i, j in izip(found, not_found):
pid[s[slice(*idx[j])]] = pid_old[i]
del pid_old, idx, s, found, not_found
return generate_upid(pid, id, True) if recursive else pid
def shuffleMockCatalog(mock_ids, halo_catalog, bin_width=None, bins=None,
proxy='mvir', box_size=None, apply_rsd=False,
shuffle_centrals=True, shuffle_satellites=True, rotate_satellites=False,
return_structured_array=False):
"""
Shuffle a mock catalog according to Zentner et al. (2014) [arXiv:1311.1818]
Parameters
----------
mock_ids : array_like
Should be a 1-d array of int which contains the corresponding halo IDs
for the galaxies in the mock catalog to be shuffled.
halo_catalog : array_like
Should be a 1-d structrued array which has the following fields:
id, upid, x, y, z, vz (if `apply_rsd` it True), and the proxy.
bin_width : float or None, optional
The width of the bin, in dex.
bins : int, array_like, or None, optional
If an integer is provided, it is interpreted as the number of bins.
If an array is provided, it is interpreted as the edges of the bins.
The parameter _overwrites_ `bin_width`.
proxy : string, optional
The proxy to bin on. Must be present in the fields of `halo_catalog`.
box_size : float or None, optional
The side length of the box. Should be in the same unit as x, y, z.
apply_rsd : bool, optional
Whether or not to apply redshift space distortions on the z-axis.
(Default is False)
shuffle_centrals : bool, optional
Whether or not to shuffle central galaxies (Default is True)
shuffle_satellites : bool, optional
Whether or not to shuffle satellite galaxies (Default is True)
rotate_satellites : bool, optional
Whether or not to apply a random rotation to satellite galaxies
(Default is False)
return_structured_array : bool, optional
Whether to return a structured array that contains x, y, z
or just a n-by-3 float array.
Returns
-------
pos : array_like
A ndarray that contains x, y, z of the shuffled positions.
"""
# check necessary fields in halo_catalog
fields = ['id', 'upid', proxy] + _axes
if apply_rsd:
fields.append('vz')
if not all((f in halo_catalog.dtype.names for f in fields)):
raise ValueError('`halo_catalog` should have the following fields: '+ \
', '.join(fields))
# check dtype
ax_type = halo_catalog['x'].dtype.type
if any((halo_catalog[ax].dtype.type != ax_type for ax in 'yz')):
raise ValueError('The types of fields x, y, z in `halo_catalog` ' \
'must all be the same.')
# check all mock_ids are in halo_catalog
s = halo_catalog['id'].argsort()
idx = np.searchsorted(halo_catalog['id'], mock_ids, sorter=s)
try:
idx = s[idx]
except IndexError:
raise ValueError('`mock_ids` must all present in `halo_catalog`')
if not (halo_catalog['id'][idx] == mock_ids).all():
raise ValueError('`mock_ids` must all present in `halo_catalog`')
mock_idx = np.ones(len(halo_catalog), dtype=int)
mock_idx *= -1
mock_idx[idx] = np.arange(len(mock_ids))
del s, idx
# separate hosts and subs
host_flag = (halo_catalog['upid'] == -1)
hosts = rename_fields(halo_catalog[host_flag], {'upid':'mock_idx'})
hosts['mock_idx'] = mock_idx[host_flag]
subs = rename_fields(halo_catalog[~host_flag], {'id':'mock_idx'})
subs['mock_idx'] = mock_idx[~host_flag]
subs = subs[subs['mock_idx'] > -1]
del host_flag, mock_idx
# group subhalos
subs.sort(order='upid')
idx = np.fromiter(_iter_plateau_in_sorted_array(subs['upid']), \
np.dtype([('start', int), ('stop', int)]))
host_ids = subs['upid'][idx['start']]
if not np.in1d(host_ids, hosts['id'], True).all():
raise ValueError('Some subhalos associdated with the mock galaxies ' \
'have no parent halos in `halo_catalog`. Consider using ' \
'`generate_upid` to fix this.')
subs_idx = np.zeros(len(hosts), dtype=idx.dtype)
subs_idx[np.in1d(hosts['id'], host_ids, True)] = idx
del idx, host_ids
# check bins
try:
bin_width = float(bin_width)
except (ValueError, TypeError):
bin_width = None
else:
if bin_width <= 0:
bin_width = None
if bin_width is None:
bin_width = 0.1
mi = np.log10(hosts[proxy].min()*0.99999)
ma = np.log10(hosts[proxy].max())
if bins is None:
bins = int(np.ceil((ma-mi)/bin_width))
mi = ma - bin_width*bins
try:
bins = int(bins)
except (ValueError, TypeError):
bins = np.asarray(bins)
if len(bins) < 2 or (bins[1:]<bins[:-1]).any():
raise ValueError('Please specify a valid `bin` parameter.')
else:
bins = np.logspace(mi, ma, bins+1)
# create the array for storing results
pos = np.empty((len(mock_ids), 3), ax_type)
pos.fill(np.nan)
# loop of bins of proxy (e.g. mvir)
for i, indices in enumerate(_iter_indices_in_bins(bins, hosts[proxy])):
if not len(indices):
continue
if i==0 or i==len(bins):
if (hosts['mock_idx'][indices] > -1).any() or \
any((subs_idx['start'][j] < subs_idx['stop'][j] \
for j in indices)):
warnings.warn('Some halos associdated with the mock catalog ' \
'are outside the bin range.', RuntimeWarning)
continue
# shuffle satellites
if shuffle_satellites:
choices = indices.tolist()
for j in indices:
subs_this = subs[slice(*subs_idx[j])]
if not len(subs_this):
continue
mock_idx_this = subs_this['mock_idx']
pos[mock_idx_this] = subs_this[_axes].view((ax_type,3))
if shuffle_satellites:
k = choices.pop(np.random.randint(len(choices)))
pos[mock_idx_this] -= _get_xyz(hosts[j], ax_type)
if rotate_satellites:
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += _get_xyz(hosts[k], ax_type)
if apply_rsd:
pos[mock_idx_this,2] += (subs_this['vz'] \
+ hosts['vz'][k] - hosts['vz'][j])/100.0
else:
if rotate_satellites:
host_pos = _get_xyz(hosts[j], ax_type)
pos[mock_idx_this] -= host_pos
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += host_pos
if apply_rsd:
pos[mock_idx_this,2] += subs_this['vz']/100.0
# shuffle hosts
has_mock = indices[hosts['mock_idx'][indices] > -1]
if not len(has_mock):
continue
mock_idx_this = hosts['mock_idx'][has_mock]
if shuffle_centrals:
has_mock = np.random.choice(indices, len(has_mock), False)
pos[mock_idx_this] = hosts[_axes][has_mock].view((ax_type,3))
if apply_rsd:
pos[mock_idx_this,2] += hosts['vz'][has_mock]/100.0
# sanity check
if np.isnan(pos).any():
warnings.warn('Some galaxies in the mock catalog have not been ' \
'assigned a new position. Maybe the corresponding halo is ' \
'outside the bin range.', RuntimeWarning)
# wrap box
if box_size is not None:
pos = np.remainder(pos, box_size, pos)
if return_structured_array:
pos = pos.view(np.dtype(zip(_axes, [ax_type]*3)))
return pos
shuffleMockCatalog.py edited online with Bitbucket (small fix to rotation)
__all__ = ['shuffleMockCatalog', 'generate_upid']
import warnings
from itertools import izip
import numpy as np
from numpy.lib.recfunctions import rename_fields
def _iter_plateau_in_sorted_array(a):
k = np.where(a[1:] != a[:-1])[0]
k += 1
i = 0
for j in k:
yield i, j
i = j
yield i, len(a)
def _iter_indices_in_bins(bins, a):
s = a.argsort()
k = np.searchsorted(a, bins, 'right', sorter=s)
i = 0
for j in k:
yield s[i:j]
i = j
yield s[i:]
def _apply_rotation(pos, box_size):
half_box_size = box_size * 0.5
pos[pos > half_box_size] -= box_size
pos[pos < -half_box_size] += box_size
return np.dot(pos, np.linalg.qr(np.random.randn(3,3))[0])
_axes = list('xyz')
def _get_xyz(a, ax_type=float):
return np.fromiter((a[ax] for ax in _axes), ax_type, 3)
def generate_upid(pid, id, recursive=True):
"""
To generate (or to fix) the upid of a halo catalog.
Parameters
----------
pid : array_like
An ndarray of integer that contains the parent IDs of each halo.
id : array_like
An ndarray of integer that contains the halo IDs.
recursive : bool, optional
Whether or not to run this function recursively. Default is True.
Returns
-------
upid : array_like
The ultimate parent IDs.
Examples
--------
>>> halos['upid'] = generate_upid(halos['pid'], halos['id'])
"""
pid = np.asanyarray(pid)
id = np.asanyarray(id)
s = pid.argsort()
idx = np.fromiter(_iter_plateau_in_sorted_array(pid[s]), \
np.dtype([('start', int), ('stop', int)]))
unique_pid = pid[s[idx['start']]]
if unique_pid[0] == -1:
unique_pid = unique_pid[1:]
idx = idx[1:]
host_flag = (pid == -1)
not_found = np.where(np.in1d(unique_pid, id[host_flag], True, True))[0]
if not len(not_found):
return pid
sub_flag = np.where(~host_flag)[0]
found = sub_flag[np.in1d(id[sub_flag], unique_pid[not_found], True)]
found = found[id[found].argsort()]
assert (id[found] == unique_pid[not_found]).all()
del host_flag, sub_flag, unique_pid
pid_old = pid.copy()
for i, j in izip(found, not_found):
pid[s[slice(*idx[j])]] = pid_old[i]
del pid_old, idx, s, found, not_found
return generate_upid(pid, id, True) if recursive else pid
def shuffleMockCatalog(mock_ids, halo_catalog, bin_width=None, bins=None,
proxy='mvir', box_size=None, apply_rsd=False,
shuffle_centrals=True, shuffle_satellites=True, rotate_satellites=False,
return_structured_array=False):
"""
Shuffle a mock catalog according to Zentner et al. (2014) [arXiv:1311.1818]
Parameters
----------
mock_ids : array_like
Should be a 1-d array of int which contains the corresponding halo IDs
for the galaxies in the mock catalog to be shuffled.
halo_catalog : array_like
Should be a 1-d structrued array which has the following fields:
id, upid, x, y, z, vz (if `apply_rsd` it True), and the proxy.
bin_width : float or None, optional
The width of the bin, in dex.
bins : int, array_like, or None, optional
If an integer is provided, it is interpreted as the number of bins.
If an array is provided, it is interpreted as the edges of the bins.
The parameter _overwrites_ `bin_width`.
proxy : string, optional
The proxy to bin on. Must be present in the fields of `halo_catalog`.
box_size : float or None, optional
The side length of the box. Should be in the same unit as x, y, z.
apply_rsd : bool, optional
Whether or not to apply redshift space distortions on the z-axis.
(Default is False)
shuffle_centrals : bool, optional
Whether or not to shuffle central galaxies (Default is True)
shuffle_satellites : bool, optional
Whether or not to shuffle satellite galaxies (Default is True)
rotate_satellites : bool, optional
Whether or not to apply a random rotation to satellite galaxies
(Default is False)
return_structured_array : bool, optional
Whether to return a structured array that contains x, y, z
or just a n-by-3 float array.
Returns
-------
pos : array_like
A ndarray that contains x, y, z of the shuffled positions.
"""
# check necessary fields in halo_catalog
fields = ['id', 'upid', proxy] + _axes
if apply_rsd:
fields.append('vz')
if not all((f in halo_catalog.dtype.names for f in fields)):
raise ValueError('`halo_catalog` should have the following fields: '+ \
', '.join(fields))
# check dtype
ax_type = halo_catalog['x'].dtype.type
if any((halo_catalog[ax].dtype.type != ax_type for ax in 'yz')):
raise ValueError('The types of fields x, y, z in `halo_catalog` ' \
'must all be the same.')
# check all mock_ids are in halo_catalog
s = halo_catalog['id'].argsort()
idx = np.searchsorted(halo_catalog['id'], mock_ids, sorter=s)
try:
idx = s[idx]
except IndexError:
raise ValueError('`mock_ids` must all present in `halo_catalog`')
if not (halo_catalog['id'][idx] == mock_ids).all():
raise ValueError('`mock_ids` must all present in `halo_catalog`')
mock_idx = np.ones(len(halo_catalog), dtype=int)
mock_idx *= -1
mock_idx[idx] = np.arange(len(mock_ids))
del s, idx
# separate hosts and subs
host_flag = (halo_catalog['upid'] == -1)
hosts = rename_fields(halo_catalog[host_flag], {'upid':'mock_idx'})
hosts['mock_idx'] = mock_idx[host_flag]
subs = rename_fields(halo_catalog[~host_flag], {'id':'mock_idx'})
subs['mock_idx'] = mock_idx[~host_flag]
subs = subs[subs['mock_idx'] > -1]
del host_flag, mock_idx
# group subhalos
subs.sort(order='upid')
idx = np.fromiter(_iter_plateau_in_sorted_array(subs['upid']), \
np.dtype([('start', int), ('stop', int)]))
host_ids = subs['upid'][idx['start']]
if not np.in1d(host_ids, hosts['id'], True).all():
raise ValueError('Some subhalos associdated with the mock galaxies ' \
'have no parent halos in `halo_catalog`. Consider using ' \
'`generate_upid` to fix this.')
subs_idx = np.zeros(len(hosts), dtype=idx.dtype)
subs_idx[np.in1d(hosts['id'], host_ids, True)] = idx
del idx, host_ids
# check bins
try:
bin_width = float(bin_width)
except (ValueError, TypeError):
bin_width = None
else:
if bin_width <= 0:
bin_width = None
if bin_width is None:
bin_width = 0.1
mi = np.log10(hosts[proxy].min()*0.99999)
ma = np.log10(hosts[proxy].max())
if bins is None:
bins = int(np.ceil((ma-mi)/bin_width))
mi = ma - bin_width*bins
try:
bins = int(bins)
except (ValueError, TypeError):
bins = np.asarray(bins)
if len(bins) < 2 or (bins[1:]<bins[:-1]).any():
raise ValueError('Please specify a valid `bin` parameter.')
else:
bins = np.logspace(mi, ma, bins+1)
# create the array for storing results
pos = np.empty((len(mock_ids), 3), ax_type)
pos.fill(np.nan)
# loop of bins of proxy (e.g. mvir)
for i, indices in enumerate(_iter_indices_in_bins(bins, hosts[proxy])):
if not len(indices):
continue
if i==0 or i==len(bins):
if (hosts['mock_idx'][indices] > -1).any() or \
any((subs_idx['start'][j] < subs_idx['stop'][j] \
for j in indices)):
warnings.warn('Some halos associdated with the mock catalog ' \
'are outside the bin range.', RuntimeWarning)
continue
# shuffle satellites
if shuffle_satellites:
choices = indices.tolist()
for j in indices:
subs_this = subs[slice(*subs_idx[j])]
if not len(subs_this):
continue
mock_idx_this = subs_this['mock_idx']
pos[mock_idx_this] = subs_this[_axes].view((ax_type,3))
if shuffle_satellites:
k = choices.pop(np.random.randint(len(choices)))
pos[mock_idx_this] -= _get_xyz(hosts[j], ax_type)
if rotate_satellites:
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += _get_xyz(hosts[k], ax_type)
if apply_rsd:
pos[mock_idx_this,2] += (subs_this['vz'] \
+ hosts['vz'][k] - hosts['vz'][j])/100.0
else:
if rotate_satellites:
host_pos = _get_xyz(hosts[j], ax_type)
pos[mock_idx_this] -= host_pos
pos[mock_idx_this] = \
_apply_rotation(pos[mock_idx_this], box_size)
pos[mock_idx_this] += host_pos
if apply_rsd:
pos[mock_idx_this,2] += subs_this['vz']/100.0
# shuffle hosts
has_mock = indices[hosts['mock_idx'][indices] > -1]
if not len(has_mock):
continue
mock_idx_this = hosts['mock_idx'][has_mock]
if shuffle_centrals:
has_mock = np.random.choice(indices, len(has_mock), False)
pos[mock_idx_this] = hosts[_axes][has_mock].view((ax_type,3))
if apply_rsd:
pos[mock_idx_this,2] += hosts['vz'][has_mock]/100.0
# sanity check
if np.isnan(pos).any():
warnings.warn('Some galaxies in the mock catalog have not been ' \
'assigned a new position. Maybe the corresponding halo is ' \
'outside the bin range.', RuntimeWarning)
# wrap box
if box_size is not None:
pos = np.remainder(pos, box_size, pos)
if return_structured_array:
pos = pos.view(np.dtype(zip(_axes, [ax_type]*3)))
return pos
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=g-classes-have-attributes
# pylint: disable=g-bad-import-order
"""Contains the base Layer class, from which all layers inherit."""
import tensorflow.compat.v2 as tf
import collections
import copy
import functools
import itertools
import textwrap
import threading
import warnings
import weakref
import numpy as np
from google.protobuf import json_format
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine import base_layer_utils
from keras.engine import input_spec
from keras.engine import keras_tensor
from keras.engine import node as node_module
from keras.mixed_precision import autocast_variable
from keras.mixed_precision import loss_scale_optimizer
from keras.mixed_precision import policy
from keras.saving.saved_model import layer_serialization
from keras.utils import generic_utils
from keras.utils import layer_utils
from keras.utils import object_identity
from keras.utils import tf_inspect
from keras.utils import tf_utils
from keras.utils import traceback_utils
from keras.utils import version_utils
# A module that only depends on `keras.layers` import these from here.
from keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import
from keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import get_canonical_name_for_symbol
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
# pylint: disable=g-inconsistent-quotes
metrics_mod = generic_utils.LazyLoader(
"metrics_mod", globals(),
"keras.metrics")
# pylint: enable=g-inconsistent-quotes
# Prefix that is added to the TF op layer names.
_TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_'
# TODO(mdan): Should we have a single generic type for types that can be passed
# to tf.cast?
_AUTOCAST_TYPES = (tf.Tensor, tf.SparseTensor,
tf.RaggedTensor)
keras_layers_gauge = tf.__internal__.monitoring.BoolGauge(
'/tensorflow/api/keras/layers', 'keras layers usage', 'method')
keras_models_gauge = tf.__internal__.monitoring.BoolGauge(
'/tensorflow/api/keras/models', 'keras model usage', 'method')
keras_api_gauge = tf.__internal__.monitoring.BoolGauge(
'/tensorflow/api/keras', 'keras api usage', 'method')
keras_premade_model_gauge = tf.__internal__.monitoring.BoolGauge(
'/tensorflow/api/keras/premade_models', 'premade keras model usage', 'type')
_is_name_scope_on_model_declaration_enabled = False
@keras_export('keras.layers.Layer')
class Layer(tf.Module, version_utils.LayerVersionSelector):
"""This is the class from which all layers inherit.
A layer is a callable object that takes as input one or more tensors and
that outputs one or more tensors. It involves *computation*, defined
in the `call()` method, and a *state* (weight variables). State can be
created in various places, at the convenience of the subclass implementer:
* in `__init__()`;
* in the optional `build()` method, which is invoked by the first
`__call__()` to the layer, and supplies the shape(s) of the input(s),
which may not have been known at initialization time;
* in the first invocation of `call()`, with some caveats discussed
below.
Users will just instantiate a layer and then treat it as a callable.
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: The dtype of the layer's computations and weights. Can also be a
`tf.keras.mixed_precision.Policy`, which allows the computation and weight
dtype to differ. Default of `None` means to use
`tf.keras.mixed_precision.global_policy()`, which is a float32 policy
unless set to different value.
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Attributes:
name: The name of the layer (string).
dtype: The dtype of the layer's weights.
variable_dtype: Alias of `dtype`.
compute_dtype: The dtype of the layer's computations. Layers automatically
cast inputs to this dtype which causes the computations and output to also
be in this dtype. When mixed precision is used with a
`tf.keras.mixed_precision.Policy`, this will be different than
`variable_dtype`.
dtype_policy: The layer's dtype policy. See the
`tf.keras.mixed_precision.Policy` documentation for details.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
trainable: Whether the layer should be trained (boolean), i.e. whether
its potentially-trainable weights should be returned as part of
`layer.trainable_weights`.
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Defines custom layer attributes, and creates layer weights
that do not depend on input shapes, using `add_weight()`, or other state.
* `build(self, input_shape)`: This method can be used to create weights that
depend on the shape(s) of the input(s), using `add_weight()`, or other
state. `__call__()` will automatically build the layer (if it has not been
built yet) by calling `build()`.
* `call(self, inputs, *args, **kwargs)`: Called in `__call__` after making
sure `build()` has been called. `call()` performs the logic of applying the
layer to the `inputs`. The first invocation may additionally create state
that could not be conveniently created in `build()`; see its docstring
for details.
Two reserved keyword arguments you can optionally use in `call()` are:
- `training` (boolean, whether the call is in inference mode or training
mode). See more details in [the layer/model subclassing guide](
https://www.tensorflow.org/guide/keras/custom_layers_and_models#privileged_training_argument_in_the_call_method)
- `mask` (boolean tensor encoding masked timesteps in the input, used
in RNN layers). See more details in [the layer/model subclassing guide](
https://www.tensorflow.org/guide/keras/custom_layers_and_models#privileged_mask_argument_in_the_call_method)
A typical signature for this method is `call(self, inputs)`, and user could
optionally add `training` and `mask` if the layer need them. `*args` and
`**kwargs` is only useful for future extension when more input parameters
are planned to be added.
* `get_config(self)`: Returns a dictionary containing the configuration used
to initialize this layer. If the keys differ from the arguments
in `__init__`, then override `from_config(self)` as well.
This method is used when saving
the layer or a model that contains this layer.
Examples:
Here's a basic example: a layer with two variables, `w` and `b`,
that returns `y = w . x + b`.
It shows how to implement `build()` and `call()`.
Variables set as attributes of a layer are tracked as weights
of the layers (in `layer.weights`).
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super(SimpleDense, self).__init__()
self.units = units
def build(self, input_shape): # Create the state of the layer (weights)
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_shape[-1], self.units),
dtype='float32'),
trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(self.units,), dtype='float32'),
trainable=True)
def call(self, inputs): # Defines the computation from inputs to outputs
return tf.matmul(inputs, self.w) + self.b
# Instantiates the layer.
linear_layer = SimpleDense(4)
# This will also call `build(input_shape)` and create the weights.
y = linear_layer(tf.ones((2, 2)))
assert len(linear_layer.weights) == 2
# These weights are trainable, so they're listed in `trainable_weights`:
assert len(linear_layer.trainable_weights) == 2
```
Note that the method `add_weight()` offers a shortcut to create weights:
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super(SimpleDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
```
Besides trainable weights, updated via backpropagation during training,
layers can also have non-trainable weights. These weights are meant to
be updated manually during `call()`. Here's a example layer that computes
the running sum of its inputs:
```python
class ComputeSum(Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
# Create a non-trainable weight.
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)),
trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
my_sum = ComputeSum(2)
x = tf.ones((2, 2))
y = my_sum(x)
print(y.numpy()) # [2. 2.]
y = my_sum(x)
print(y.numpy()) # [4. 4.]
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []
```
For more information about creating layers, see the guide
[Making new Layers and Models via subclassing](
https://www.tensorflow.org/guide/keras/custom_layers_and_models)
"""
# See tf.Module for the usage of this property.
# The key for _obj_reference_counts_dict is a Trackable, which could be a
# variable or layer etc. tf.Module._flatten will fail to flatten the key
# since it is trying to convert Trackable to a string. This attribute can be
# ignored even after the fix of nest lib, since the trackable object should
# already been available as individual attributes. _obj_reference_counts_dict
# just contains a copy of them.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_obj_reference_counts_dict',),
tf.Module._TF_MODULE_IGNORED_PROPERTIES
))
# When loading from a SavedModel, Layers typically can be revived into a
# generic Layer wrapper. Sometimes, however, layers may implement methods
# that go beyond this wrapper, as in the case of PreprocessingLayers'
# `adapt` method. When this is the case, layer implementers can override
# must_restore_from_config to return True; layers with this property must
# be restored into their actual objects (and will fail if the object is
# not available to the restoration code).
_must_restore_from_config = False
def _get_cell_name(self):
canonical_name = get_canonical_name_for_symbol(
self.__class__, api_name='keras', add_prefix_to_v1_names=True)
if canonical_name is not None:
return 'tf.{}'.format(canonical_name)
return self.__class__.__module__ + '.' + self.__class__.__name__
def _instrument_layer_creation(self):
self._instrumented_keras_api = False
self._instrumented_keras_layer_class = False
self._instrumented_keras_model_class = False
if not getattr(self, '_disable_keras_instrumentation', False):
keras_api_gauge.get_cell('layer').set(True)
self._instrumented_keras_api = True
if getattr(self, '_is_model_for_instrumentation', False):
keras_models_gauge.get_cell(self._get_cell_name()).set(True)
self._instrumented_keras_model_class = True
else:
keras_layers_gauge.get_cell(self._get_cell_name()).set(True)
self._instrumented_keras_layer_class = True
else:
# This is a legacy layer that has disabled instrumentation
# as a native keras object. We still instrument this as
# legacy usage.
keras_api_gauge.get_cell('legacy_layer').set(True)
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self,
trainable=True,
name=None,
dtype=None,
dynamic=False,
**kwargs):
self._instrument_layer_creation()
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_dim',
'input_shape',
'batch_input_shape',
'batch_size',
'weights',
'activity_regularizer',
'autocast',
'implementation',
}
# Validate optional keyword arguments.
generic_utils.validate_kwargs(kwargs, allowed_kwargs)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training.
if not (isinstance(trainable, bool) or
(isinstance(trainable, (tf.Tensor, tf.Variable)) and
trainable.dtype is tf.bool)):
raise TypeError(
'Expected `trainable` argument to be a boolean, '
f'but got: {trainable}')
self._trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self._stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights. (Note that the first call() may also create weights,
# independent of build().)
self.built = False
# Provides information about which inputs are compatible with the layer.
self._input_spec = None
# SavedModel-related attributes.
# Record the build input shape for loading purposes.
# TODO(kathywu): Move this to Layer._set_save_spec once cl/290121460 is
# submitted.
self._build_input_shape = None
self._saved_model_inputs_spec = None
self._saved_model_arg_spec = None
# `Layer.compute_mask` will be called at the end of `Layer.__call__` if
# `Layer.compute_mask` is overridden, or if the `Layer` subclass sets
# `self.supports_masking=True`.
self._supports_masking = not generic_utils.is_default(self.compute_mask)
self._init_set_name(name)
self._activity_regularizer = regularizers.get(
kwargs.pop('activity_regularizer', None))
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = []
# Object to store all thread local layer properties.
self._thread_local = threading.local()
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# Ensures the same metric is not added multiple times in `MirroredStrategy`.
self._metrics_lock = threading.Lock()
# Both graph and subclassed networks have a dtype policy. For graph
# networks, the policy's compute and variable dtypes are ignored. Such
# networks only use the policy if it is a PolicyV1, in which case it uses
# the PolicyV1's loss_scale (Policy does not have a loss_scale). For
# subclassed networks, the compute and variable dtypes are used as like any
# ordinary layer.
self._set_dtype_policy(dtype)
# Boolean indicating whether the layer automatically casts its inputs to the
# layer's compute_dtype.
self._autocast = kwargs.get('autocast',
base_layer_utils.v2_dtype_behavior_enabled())
# Tracks `TrackableDataStructure`s, `Module`s, and `Layer`s.
# Ordered by when the object was assigned as an attr.
# Entries are unique.
self._maybe_create_attribute('_self_tracked_trackables', [])
# These lists will be filled via successive calls
# to self._add_inbound_node().
# Used in symbolic mode only, only in conjunction with graph-networks
self._inbound_nodes_value = []
self._outbound_nodes_value = []
self._init_call_fn_args()
# Whether the `call` method can be used to build a TF graph without issues.
# This attribute has no effect if the model is created using the Functional
# API. Instead, `model.dynamic` is determined based on the internal layers.
if not isinstance(dynamic, bool):
raise TypeError(
f'Expected `dynamic` argument to be a boolean, but got: {dynamic}')
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_dim' in kwargs and 'input_shape' not in kwargs:
# Backwards compatibility: alias 'input_dim' to 'input_shape'.
kwargs['input_shape'] = (kwargs['input_dim'],)
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
self._initial_weights = kwargs.get('weights', None)
# Whether the layer will track any layers that is set as attribute on itself
# as sub-layers, the weights from the sub-layers will be included in the
# parent layer's variables() as well.
# Default to True, which means auto tracking is turned on. Certain subclass
# might want to turn it off, like Sequential model.
self._auto_track_sub_layers = True
# For backwards compat reasons, most built-in layers do not guarantee
# That they will 100% preserve the structure of input args when saving
# / loading configs. E.g. they may un-nest an arg that is
# a list with one element.
self._preserve_input_structure_in_config = False
# Save outer name scope at layer declaration so that it is preserved at
# the actual layer construction.
self._outer_name_scope = tf.get_current_name_scope()
@tf.__internal__.tracking.no_automatic_dependency_tracking
@generic_utils.default
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call. It is invoked automatically before
the first execution of `call()`.
This is typically used to create the weights of `Layer` subclasses
(at the discretion of the subclass implementer).
Args:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
# Only record the build input shapes of overridden build methods.
if not hasattr(self.build, '_is_default'):
self._build_input_shape = input_shape
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
The `call()` method may not create state (except in its first invocation,
wrapping the creation of variables or other resources in `tf.init_scope()`).
It is recommended to create state in `__init__()`, or the `build()` method
that is called automatically before `call()` executes the first time.
Args:
inputs: Input tensor, or dict/list/tuple of input tensors.
The first positional `inputs` argument is subject to special rules:
- `inputs` must be explicitly passed. A layer cannot have zero
arguments, and `inputs` cannot be provided via the default value
of a keyword argument.
- NumPy array or Python scalar values in `inputs` get cast as tensors.
- Keras mask metadata is only collected from `inputs`.
- Layers are built (`build(input_shape)` method)
using shape info from `inputs` only.
- `input_spec` compatibility is only checked against `inputs`.
- Mixed precision input casting is only applied to `inputs`.
If a layer has tensor arguments in `*args` or `**kwargs`, their
casting behavior in mixed precision should be handled manually.
- The SavedModel input specification is generated using `inputs` only.
- Integration with various ecosystem packages like TFMOT, TFLite,
TF.js, etc is only supported for `inputs` and not for tensors in
positional and keyword arguments.
*args: Additional positional arguments. May contain tensors, although
this is not recommended, for the reasons above.
**kwargs: Additional keyword arguments. May contain tensors, although
this is not recommended, for the reasons above.
The following optional keyword arguments are reserved:
- `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
- `mask`: Boolean input mask. If the layer's `call()` method takes a
`mask` argument, its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from a layer
that generated a corresponding mask, i.e. if it came from a Keras
layer with masking support).
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def _add_trackable(self, trackable_object, trainable):
"""Adds a Trackable object to this layer's state.
Args:
trackable_object: The tf.tracking.Trackable object to add.
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases) or
"non_trainable_variables" (e.g. BatchNorm mean and variance).
Returns:
The TrackableWeightHandler used to track this object.
"""
if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler):
handler = trackable_object
else:
handler = base_layer_utils.TrackableWeightHandler(trackable_object)
if trainable:
self._trainable_weights.append(handler)
else:
self._non_trainable_weights.append(handler)
return handler
@doc_controls.for_subclass_implementers
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
use_resource=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Args:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype`.
initializer: Initializer instance (callable).
regularizer: Regularizer instance (callable).
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that `trainable` cannot be `True` if `synchronization`
is set to `ON_READ`.
constraint: Constraint instance (callable).
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter`,
`collections`, `experimental_autocast` and `caching_device`.
Returns:
The variable created.
Raises:
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
if shape is None:
shape = ()
kwargs.pop('partitioner', None) # Ignored.
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['collections', 'experimental_autocast',
'caching_device', 'getter']:
raise TypeError('Unknown keyword argument:', kwarg)
collections_arg = kwargs.pop('collections', None)
# 'experimental_autocast' can be set to False by the caller to indicate an
# AutoCastVariable should never be created.
autocast = kwargs.pop('experimental_autocast', True)
# See the docstring for tf.Variable about the details for caching_device.
caching_device = kwargs.pop('caching_device', None)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = tf.as_dtype(dtype)
if self._dtype_policy.variable_dtype is None:
# The policy is "_infer", so we infer the policy from the variable dtype.
self._set_dtype_policy(policy.Policy(dtype.base_dtype.name))
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.get('glorot_uniform')
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.get('zeros')
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
elif 'getter' not in kwargs:
# When `getter` is specified, it's possibly fine for `initializer` to be
# None since it's up to the custom `getter` to raise error in case it
# indeed needs `initializer`.
raise ValueError(f'An initializer for variable {name} of type '
f'{dtype.base_dtype} is required for layer '
f'{self.name}. Received: {initializer}.')
getter = kwargs.pop('getter', base_layer_utils.make_variable)
if (autocast and
self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype
and dtype.is_floating):
old_getter = getter
# Wrap variable constructor to return an AutoCastVariable.
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
return autocast_variable.create_autocast_variable(variable)
# Also the caching_device does not work with the mixed precision API,
# disable it if it is specified.
# TODO(b/142020079): Re-enable it once the bug is fixed.
if caching_device is not None:
tf_logging.warning(
'`caching_device` does not work with mixed precision API. Ignoring '
'user specified `caching_device`.')
caching_device = None
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Trackable` method.
getter=getter,
# Manage errors in Layer rather than Trackable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable,
use_resource=use_resource,
collections=collections_arg,
synchronization=synchronization,
aggregation=aggregation,
caching_device=caching_device)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
name_in_scope = variable.name[:variable.name.find(':')]
self._handle_weight_regularization(name_in_scope,
variable,
regularizer)
if base_layer_utils.is_split_variable(variable):
for v in variable:
backend.track_variable(v)
if trainable:
self._trainable_weights.append(v)
else:
self._non_trainable_weights.append(v)
else:
backend.track_variable(variable)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
@generic_utils.default
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Note that `get_config()` does not guarantee to return a fresh copy of dict
every time it is called. The callers should make a copy of the returned dict
if they want to modify it.
Returns:
Python dictionary.
"""
all_args = tf_inspect.getfullargspec(self.__init__).args
config = {
'name': self.name,
'trainable': self.trainable,
}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
config['dtype'] = policy.serialize(self._dtype_policy)
if hasattr(self, 'dynamic'):
# Only include `dynamic` in the `config` if it is `True`
if self.dynamic:
config['dynamic'] = self.dynamic
elif 'dynamic' in all_args:
all_args.remove('dynamic')
expected_args = config.keys()
# Finds all arguments in the `__init__` that are not in the config:
extra_args = [arg for arg in all_args if arg not in expected_args]
# Check that either the only argument in the `__init__` is `self`,
# or that `get_config` has been overridden:
if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):
raise NotImplementedError(textwrap.dedent(f"""
Layer {self.__class__.__name__} has arguments {extra_args}
in `__init__` and therefore must override `get_config()`.
Example:
class CustomLayer(keras.layers.Layer):
def __init__(self, arg1, arg2):
super().__init__()
self.arg1 = arg1
self.arg2 = arg2
def get_config(self):
config = super().get_config()
config.update({{
"arg1": self.arg1,
"arg2": self.arg2,
}})
return config"""))
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Args:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
This method will cause the layer's state to be built, if that has not
happened before. This requires that the layer will later be used with
inputs that match the input shape provided here.
Args:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if tf.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually with shapes that are incompatible
# with the shape the Layer will be called on (these users will have to
# implement `compute_output_shape` themselves).
self._maybe_build(input_shape)
graph_name = str(self.name) + '_scratch_graph'
with tf.__internal__.FuncGraph(graph_name).as_default():
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
inputs = tf.nest.map_structure(_make_placeholder_like, input_shape)
try:
outputs = self(inputs, training=False)
except TypeError as e:
raise NotImplementedError(
'We could not automatically infer the static shape of the '
'layer\'s output. Please implement the '
'`compute_output_shape` method on your layer (%s).' %
self.__class__.__name__) from e
return tf.nest.map_structure(lambda t: t.shape, outputs)
raise NotImplementedError(
'Please run in eager mode or implement the `compute_output_shape` '
'method on your layer (%s).' % self.__class__.__name__)
@doc_controls.for_subclass_implementers
def compute_output_signature(self, input_signature):
"""Compute the output tensor signature of the layer based on the inputs.
Unlike a TensorShape object, a TensorSpec object contains both shape
and dtype information for a tensor. This method allows layers to provide
output dtype information if it is different from the input dtype.
For any layer that doesn't implement this function,
the framework will fall back to use `compute_output_shape`, and will
assume that the output dtype matches the input dtype.
Args:
input_signature: Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.
Returns:
Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.
Raises:
TypeError: If input_signature contains a non-TensorSpec object.
"""
def check_type_return_shape(s):
if not isinstance(s, tf.TensorSpec):
raise TypeError('Only TensorSpec signature types are supported. '
f'Received: {s}.')
return s.shape
input_shape = tf.nest.map_structure(check_type_return_shape, input_signature)
output_shape = self.compute_output_shape(input_shape)
dtype = self._compute_dtype
if dtype is None:
input_dtypes = [s.dtype for s in tf.nest.flatten(input_signature)]
# Default behavior when self.dtype is None, is to use the first input's
# dtype.
dtype = input_dtypes[0]
return tf.nest.map_structure(
lambda s: tf.TensorSpec(dtype=dtype, shape=s),
output_shape)
def _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs):
if self.dynamic:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
input_signature = tf.nest.map_structure(
lambda x: tf.TensorSpec(shape=x.shape, dtype=x.dtype),
inputs)
output_signature = self.compute_output_signature(input_signature)
return tf.nest.map_structure(keras_tensor.KerasTensor, output_signature)
else:
return self._infer_output_signature(inputs, args, kwargs, input_masks)
def _infer_output_signature(self, inputs, args, kwargs, input_masks):
"""Call the layer on input KerasTensors and returns output KerasTensors."""
call_fn = self.call
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
call_fn = tf.__internal__.autograph.tf_convert(
self.call, tf.__internal__.autograph.control_status_ctx())
call_fn = traceback_utils.inject_argument_info_in_traceback(
call_fn,
object_name=f'layer "{self.name}" (type {self.__class__.__name__})')
# We enter a scratch graph and build placeholder inputs inside of it that
# match the input args.
# We then call the layer inside of the scratch graph to identify the
# output signatures, then we build KerasTensors corresponding to those
# outputs.
scratch_graph = tf.__internal__.FuncGraph(str(self.name) + '_scratch_graph')
with scratch_graph.as_default():
inputs = tf.nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, inputs)
args = tf.nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, args)
kwargs = tf.nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, kwargs)
input_masks = tf.nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, input_masks)
with backend.name_scope(self._name_scope()): # pylint: disable=not-callable
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
# Build layer if applicable (if the `build` method has been
# overridden).
# TODO(kaftan): do we maybe_build here, or have we already done it?
self._maybe_build(inputs)
inputs = self._maybe_cast_inputs(inputs)
outputs = call_fn(inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks,
build_graph=False)
outputs = tf.nest.map_structure(
keras_tensor.keras_tensor_from_tensor, outputs)
self._set_save_spec(inputs, args, kwargs)
if hasattr(self, '_set_inputs') and not self.inputs:
# TODO(kaftan): figure out if we need to do this at all
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
self._set_inputs(inputs, outputs)
del scratch_graph
return outputs
@generic_utils.default
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Args:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self._supports_masking:
if any(m is not None for m in tf.nest.flatten(mask)):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask.
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
@traceback_utils.filter_traceback
def __call__(self, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Args:
*args: Positional arguments to be passed to `self.call`.
**kwargs: Keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
- If the layer is not built, the method will call `build`.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
RuntimeError: if `super().__init__()` was not called in the constructor.
"""
if not hasattr(self, '_thread_local'):
raise RuntimeError(
'You must call `super().__init__()` in the layer constructor.')
# `inputs` (the first arg in the method spec) is special cased in
# layer call due to historical reasons.
# This special casing currently takes the form of:
# - 'inputs' must be explicitly passed. A layer cannot have zero arguments,
# and inputs cannot have been provided via the default value of a kwarg.
# - numpy/scalar values in `inputs` get converted to tensors
# - implicit masks / mask metadata are only collected from 'inputs`
# - Layers are built using shape info from 'inputs' only
# - input_spec compatibility is only checked against `inputs`
# - mixed precision casting (autocast) is only applied to `inputs`,
# not to any other argument.
inputs, args, kwargs = self._split_out_first_arg(args, kwargs)
input_list = tf.nest.flatten(inputs)
# Functional Model construction mode is invoked when `Layer`s are called on
# symbolic `KerasTensor`s, i.e.:
# >> inputs = tf.keras.Input(10)
# >> outputs = MyLayer()(inputs) # Functional construction mode.
# >> model = tf.keras.Model(inputs, outputs)
if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
return self._functional_construction_call(inputs, args, kwargs,
input_list)
# Maintains info about the `Layer.call` stack.
call_context = base_layer_utils.call_context()
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (
tf.Tensor, np.ndarray, float, int)) for x in input_list):
inputs = tf.nest.map_structure(_convert_numpy_or_python_types, inputs)
input_list = tf.nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
input_masks, mask_is_implicit = self._get_input_masks(
inputs, input_list, args, kwargs)
if self._expects_mask_arg and mask_is_implicit:
kwargs['mask'] = input_masks
# Training mode for `Layer.call` is set via (in order of priority):
# (1) The `training` argument passed to this `Layer.call`, if it is not None
# (2) The training mode of an outer `Layer.call`.
# (3) The default mode set by `tf.keras.backend.set_learning_phase` (if set)
# (4) Any non-None default value for `training` specified in the call
# signature
# (5) False (treating the layer as if it's in inference)
args, kwargs, training_mode = self._set_training_mode(
args, kwargs, call_context)
# Losses are cleared for all sublayers on the outermost `Layer.call`.
# Losses are not cleared on inner `Layer.call`s, because sublayers can be
# called multiple times.
if not call_context.in_call:
self._clear_losses()
eager = tf.executing_eagerly()
with call_context.enter(
layer=self,
inputs=inputs,
build_graph=not eager,
training=training_mode):
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
if eager:
call_fn = self.call
name_scope = self._name
else:
name_scope = self._name_scope() # Avoid autoincrementing. # pylint: disable=not-callable
call_fn = self._autographed_call()
call_fn = traceback_utils.inject_argument_info_in_traceback(
call_fn,
object_name=f'layer "{self.name}" (type {self.__class__.__name__})')
with tf.name_scope(name_scope):
if not self.built:
self._maybe_build(inputs)
if self._autocast:
inputs = self._maybe_cast_inputs(inputs, input_list)
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
outputs = call_fn(inputs, *args, **kwargs)
if self._activity_regularizer:
self._handle_activity_regularization(inputs, outputs)
if self._supports_masking:
self._set_mask_metadata(inputs, outputs, input_masks, not eager)
if self._saved_model_inputs_spec is None:
self._set_save_spec(inputs, args, kwargs)
return outputs
def _functional_construction_call(self, inputs, args, kwargs, input_list):
call_context = base_layer_utils.call_context()
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (
tf.Tensor, np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (tf.Tensor, np.ndarray, float, int)):
return tf.convert_to_tensor(x)
return x
inputs = tf.nest.map_structure(_convert_non_tensor, inputs)
input_list = tf.nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
mask_arg_passed_by_framework = False
input_masks, mask_is_implicit = self._get_input_masks(
inputs, input_list, args, kwargs)
if self._expects_mask_arg and mask_is_implicit:
kwargs['mask'] = input_masks
mask_arg_passed_by_framework = True
# If `training` argument is None or not explicitly passed,
# propagate `training` value from this layer's calling layer.
training_value = None
training_arg_passed_by_framework = False
# Priority 1: `training` was explicitly passed a non-None value.
if self._call_arg_was_passed('training', args, kwargs):
training_value = self._get_call_arg_value('training', args, kwargs)
if not self._expects_training_arg:
kwargs.pop('training')
if training_value is None:
# Priority 2: `training` was passed to a parent layer.
if call_context.training is not None:
training_value = call_context.training
# Priority 3: `learning_phase()` has been set.
elif backend.global_learning_phase_is_set():
training_value = backend.learning_phase()
# Force the training_value to be bool type which matches to the contract
# for layer/model call args.
if tf.is_tensor(training_value):
training_value = tf.cast(training_value, tf.bool)
else:
training_value = bool(training_value)
# Priority 4: trace layer with the default training argument specified
# in the `call` signature (or in inference mode if the `call` signature
# specifies no non-None default).
else:
training_value = self._default_training_arg
# In cases (2), (3), (4) the training argument is passed automatically
# by the framework, and will not be hard-coded into the model.
if self._expects_training_arg:
args, kwargs = self._set_call_arg_value('training', training_value,
args, kwargs)
training_arg_passed_by_framework = True
with call_context.enter(
layer=self, inputs=inputs, build_graph=True, training=training_value):
# Check input assumptions set after layer building, e.g. input shape.
outputs = self._keras_tensor_symbolic_call(
inputs, input_masks, args, kwargs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
if training_arg_passed_by_framework:
args, kwargs = self._set_call_arg_value(
'training', None, args, kwargs, pop_kwarg_if_none=True)
if mask_arg_passed_by_framework:
kwargs.pop('mask')
# Node connectivity does not special-case the first argument.
outputs = self._set_connectivity_metadata((inputs,) + args, kwargs,
outputs)
return outputs
def _set_training_mode(self, args, kwargs, call_context):
training_mode = None
if self._expects_training_arg:
# (1) `training` was passed to this `Layer.call`.
if self._call_arg_was_passed('training', args, kwargs):
training_mode = self._get_call_arg_value('training', args, kwargs)
# If no `training` arg was passed, or `None` was explicitly passed,
# the framework will make a decision about the training mode is.
if training_mode is None:
call_ctx_training = call_context.training
# (2) `training` mode is inferred from an outer `Layer.call`.
if call_ctx_training is not None:
training_mode = call_ctx_training
# (3) User set `tf.keras.backend.set_learning_phase`.
elif backend.global_learning_phase_is_set():
training_mode = backend.learning_phase()
# Ensure value is a `bool` or `tf.bool`.
if isinstance(training_mode, bool):
pass
elif tf.is_tensor(training_mode):
training_mode = tf.cast(training_mode, tf.bool)
else:
training_mode = bool(training_mode)
# (4) We default to using `call`'s default value for `training`,
# or treating the layer as if it is in inference if no non-None default
# is specified in the `call` signature.
else:
training_mode = self._default_training_arg
# For case (2), (3), (4) `training` arg is passed by framework.
args, kwargs = self._set_call_arg_value('training', training_mode, args,
kwargs)
else:
if 'training' in kwargs:
# `training` was passed to this `Layer` but is not needed for
# `Layer.call`. It will set the default mode for inner `Layer.call`s.
training_mode = kwargs.pop('training')
else:
# Grab the current `training` mode from any outer `Layer.call`.
training_mode = call_context.training
return args, kwargs, training_mode
def _autographed_call(self):
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
return tf.__internal__.autograph.tf_convert(
self.call, tf.__internal__.autograph.control_status_ctx())
else:
return self.call
@property
def dtype(self):
"""The dtype of the layer weights.
This is equivalent to `Layer.dtype_policy.variable_dtype`. Unless
mixed precision is used, this is the same as `Layer.compute_dtype`, the
dtype of the layer's computations.
"""
return self._dtype_policy.variable_dtype
@property
def name(self):
"""Name of the layer (string), set in the constructor."""
return self._name
@property
def supports_masking(self):
"""Whether this layer supports computing a mask using `compute_mask`."""
return self._supports_masking
@supports_masking.setter
def supports_masking(self, value):
self._supports_masking = value
@property
def dynamic(self):
"""Whether the layer is dynamic (eager-only); set in the constructor."""
return any(layer._dynamic for layer in self._flatten_layers())
@property
@doc_controls.do_not_doc_inheritable
def stateful(self):
return any(layer._stateful for layer in self._flatten_layers())
@stateful.setter
def stateful(self, value):
self._stateful = value
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
"""Sets trainable attribute for the layer and its sublayers.
When this value is changed during training (e.g. with a
`tf.keras.callbacks.Callback`) you need to call the parent
`tf.keras.Model.make_train_function` with `force=True` in order to recompile
the training graph.
Args:
value: Boolean with the desired state for the layer's trainable attribute.
"""
for layer in self._flatten_layers():
layer._trainable = value
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def input_spec(self):
"""`InputSpec` instance(s) describing the input format for this layer.
When you create a layer subclass, you can set `self.input_spec` to enable
the layer to run input compatibility checks when it is called.
Consider a `Conv2D` layer: it can only be called on a single input tensor
of rank 4. As such, you can set, in `__init__()`:
```python
self.input_spec = tf.keras.layers.InputSpec(ndim=4)
```
Now, if you try to call the layer on an input that isn't rank 4
(for instance, an input of shape `(2,)`, it will raise a nicely-formatted
error:
```
ValueError: Input 0 of layer conv2d is incompatible with the layer:
expected ndim=4, found ndim=1. Full shape received: [2]
```
Input checks that can be specified via `input_spec` include:
- Structure (e.g. a single input, a list of 2 inputs, etc)
- Shape
- Rank (ndim)
- Dtype
For more information, see `tf.keras.layers.InputSpec`.
Returns:
A `tf.keras.layers.InputSpec` instance, or nested structure thereof.
"""
return self._input_spec
@input_spec.setter
# Must be decorated to prevent tracking, since the input_spec can be nested
# InputSpec objects.
@tf.__internal__.tracking.no_automatic_dependency_tracking
def input_spec(self, value):
for v in tf.nest.flatten(value):
if v is not None and not isinstance(v, InputSpec):
raise TypeError('Layer input_spec must be an instance of InputSpec. '
'Got: {}'.format(v))
self._input_spec = value
@property
def trainable_weights(self):
"""List of all trainable weights tracked by this layer.
Trainable weights are updated via gradient descent during training.
Returns:
A list of trainable variables.
"""
if self.trainable:
children_weights = self._gather_children_attribute('trainable_variables')
return self._dedup_weights(self._trainable_weights + children_weights)
else:
return []
@property
def non_trainable_weights(self):
"""List of all non-trainable weights tracked by this layer.
Non-trainable weights are *not* updated during training. They are expected
to be updated manually in `call()`.
Returns:
A list of non-trainable variables.
"""
if self.trainable:
children_weights = self._gather_children_attribute(
'non_trainable_variables')
non_trainable_weights = self._non_trainable_weights + children_weights
else:
children_weights = self._gather_children_attribute('variables')
non_trainable_weights = (
self._trainable_weights + self._non_trainable_weights +
children_weights)
return self._dedup_weights(non_trainable_weights)
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.trainable_weights + self.non_trainable_weights
@property
@doc_controls.do_not_generate_docs
def updates(self):
warnings.warn(
'`layer.updates` will be removed in a future version. '
'This property should not be used in TensorFlow 2.0, '
'as `updates` are applied automatically.',
stacklevel=2)
return []
@property
def losses(self):
"""List of losses added using the `add_loss()` API.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Examples:
>>> class MyLayer(tf.keras.layers.Layer):
... def call(self, inputs):
... self.add_loss(tf.abs(tf.reduce_mean(inputs)))
... return inputs
>>> l = MyLayer()
>>> l(np.ones((10, 1)))
>>> l.losses
[1.0]
>>> inputs = tf.keras.Input(shape=(10,))
>>> x = tf.keras.layers.Dense(10)(inputs)
>>> outputs = tf.keras.layers.Dense(1)(x)
>>> model = tf.keras.Model(inputs, outputs)
>>> # Activity regularization.
>>> len(model.losses)
0
>>> model.add_loss(tf.abs(tf.reduce_mean(x)))
>>> len(model.losses)
1
>>> inputs = tf.keras.Input(shape=(10,))
>>> d = tf.keras.layers.Dense(10, kernel_initializer='ones')
>>> x = d(inputs)
>>> outputs = tf.keras.layers.Dense(1)(x)
>>> model = tf.keras.Model(inputs, outputs)
>>> # Weight regularization.
>>> model.add_loss(lambda: tf.reduce_mean(d.kernel))
>>> model.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=1.0>]
Returns:
A list of tensors.
"""
collected_losses = []
for layer in self._flatten_layers():
# If any eager losses are present, we assume the model to be part of an
# eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
if layer._eager_losses:
# Filter placeholder losses that may have been added by revived layers.
# (see base_layer_utils for details).
if (layer._eager_losses[0] is
not base_layer_utils.REVIVED_LOSS_PLACEHOLDER):
collected_losses.extend(layer._eager_losses)
else:
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses
def add_loss(self, losses, **kwargs):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This method can be used inside a subclassed layer or model's `call`
function, in which case `losses` should be a Tensor or list of Tensors.
Example:
```python
class MyLayer(tf.keras.layers.Layer):
def call(self, inputs):
self.add_loss(tf.abs(tf.reduce_mean(inputs)))
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any loss Tensors passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
losses become part of the model's topology and are tracked in `get_config`.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Activity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))
```
If this is not the case for your loss (if, for example, your loss references
a `Variable` of one of the model's layers), you can wrap your loss in a
zero-argument lambda. These losses are not tracked as part of the model's
topology since they can't be serialized.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
d = tf.keras.layers.Dense(10)
x = d(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(d.kernel))
```
Args:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
**kwargs: Additional keyword arguments for backward compatibility.
Accepted values:
inputs - Deprecated, will be automatically inferred.
"""
kwargs.pop('inputs', None)
if kwargs:
raise TypeError('Unknown keyword arguments: %s' % (kwargs.keys(),))
def _tag_callable(loss):
"""Tags callable loss tensor as `_unconditional_loss`."""
if callable(loss):
# We run the loss without autocasting, as regularizers are often
# numerically unstable in float16.
with autocast_variable.enable_auto_cast_variables(None):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tf.is_tensor(loss):
loss = tf.convert_to_tensor(
loss, dtype=backend.floatx())
loss._unconditional_loss = True # pylint: disable=protected-access
return loss
losses = tf.nest.flatten(losses)
callable_losses = []
eager_losses = []
symbolic_losses = []
for loss in losses:
if callable(loss):
callable_losses.append(functools.partial(_tag_callable, loss))
continue
if loss is None:
continue
if not tf.is_tensor(loss) and not isinstance(
loss, keras_tensor.KerasTensor):
loss = tf.convert_to_tensor(
loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if ((tf_utils.is_symbolic_tensor(loss) or
isinstance(loss, keras_tensor.KerasTensor)) and
not base_layer_utils.is_in_tf_function()):
symbolic_losses.append(loss)
elif tf.is_tensor(loss):
eager_losses.append(loss)
self._callable_losses.extend(callable_losses)
in_call_context = base_layer_utils.call_context().in_call
if eager_losses and not in_call_context:
raise ValueError(
'Expected a symbolic Tensors or a callable for the loss value. '
'Please wrap your loss computation in a zero argument `lambda`.')
self._eager_losses.extend(eager_losses)
for symbolic_loss in symbolic_losses:
if getattr(self, '_is_graph_network', False):
self._graph_network_add_loss(symbolic_loss)
else:
# Possible a loss was added in a Layer's `build`.
self._losses.append(symbolic_loss)
def _clear_losses(self):
"""Used every step in eager to reset losses."""
# Set to thread local directly to avoid Layer.__setattr__ overhead.
if not getattr(self, '_self_tracked_trackables',
None): # Fast path for single Layer.
self._thread_local._eager_losses = []
else:
for layer in self._flatten_layers():
layer._thread_local._eager_losses = []
@property
def metrics(self):
"""List of metrics added using the `add_metric()` API.
Example:
>>> input = tf.keras.layers.Input(shape=(3,))
>>> d = tf.keras.layers.Dense(2)
>>> output = d(input)
>>> d.add_metric(tf.reduce_max(output), name='max')
>>> d.add_metric(tf.reduce_min(output), name='min')
>>> [m.name for m in d.metrics]
['max', 'min']
Returns:
A list of `Metric` objects.
"""
collected_metrics = []
for layer in self._flatten_layers():
if not hasattr(layer, '_metrics_lock'):
continue
with layer._metrics_lock:
collected_metrics.extend(layer._metrics)
return collected_metrics
def add_metric(self, value, name=None, **kwargs):
"""Adds metric tensor to the layer.
This method can be used inside the `call()` method of a subclassed layer
or model.
```python
class MyMetricLayer(tf.keras.layers.Layer):
def __init__(self):
super(MyMetricLayer, self).__init__(name='my_metric_layer')
self.mean = tf.keras.metrics.Mean(name='metric_1')
def call(self, inputs):
self.add_metric(self.mean(inputs))
self.add_metric(tf.reduce_sum(inputs), name='metric_2')
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any tensor passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
metrics become part of the model's topology and are tracked when you
save the model via `save()`.
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(math_ops.reduce_sum(x), name='metric_1')
```
Note: Calling `add_metric()` with the result of a metric object on a
Functional Model, as shown in the example below, is not supported. This is
because we cannot trace the metric result tensor back to the model's inputs.
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
```
Args:
value: Metric tensor.
name: String metric name.
**kwargs: Additional keyword arguments for backward compatibility.
Accepted values:
`aggregation` - When the `value` tensor provided is not the result of
calling a `keras.Metric` instance, it will be aggregated by default
using a `keras.Metric.Mean`.
"""
kwargs_keys = list(kwargs.keys())
if (len(kwargs_keys) > 1 or
(len(kwargs_keys) == 1 and kwargs_keys[0] != 'aggregation')):
raise TypeError(f'Unknown keyword arguments: {kwargs.keys()}. '
'Expected `aggregation`.')
from_metric_obj = hasattr(value, '_metric_obj')
is_symbolic = isinstance(value, keras_tensor.KerasTensor)
in_call_context = base_layer_utils.call_context().in_call
if name is None and not from_metric_obj:
# Eg. `self.add_metric(math_ops.reduce_sum(x))`
# In eager mode, we use metric name to lookup a metric. Without a name,
# a new Mean metric wrapper will be created on every model/layer call.
# So, we raise an error when no name is provided.
# We will do the same for symbolic mode for consistency although a name
# will be generated if no name is provided.
# We will not raise this error in the foll use case for the sake of
# consistency as name in provided in the metric constructor.
# mean = metrics.Mean(name='my_metric')
# model.add_metric(mean(outputs))
raise ValueError('Please provide a name for your metric like '
'`self.add_metric(tf.reduce_sum(inputs), '
'name=\'mean_activation\')`')
elif from_metric_obj:
name = value._metric_obj.name
if not in_call_context and not is_symbolic:
raise ValueError('Expected a symbolic Tensor for the metric value, '
'received: ' + str(value))
# If a metric was added in a Layer's `call` or `build`.
if in_call_context or not getattr(self, '_is_graph_network', False):
# TF Function path should take the eager path.
# If the given metric is available in `metrics` list we just update state
# on it, otherwise we create a new metric instance and
# add it to the `metrics` list.
metric_obj = getattr(value, '_metric_obj', None)
# Tensors that come from a Metric object already updated the Metric state.
should_update_state = not metric_obj
name = metric_obj.name if metric_obj else name
with self._metrics_lock:
match = self._get_existing_metric(name)
if match:
metric_obj = match
elif metric_obj:
self._metrics.append(metric_obj)
else:
# Build the metric object with the value's dtype if it defines one
metric_obj = metrics_mod.Mean(
name=name, dtype=getattr(value, 'dtype', None))
self._metrics.append(metric_obj)
if should_update_state:
metric_obj(value)
else:
if from_metric_obj:
raise ValueError('Using the result of calling a `Metric` object '
'when calling `add_metric` on a Functional '
'Model is not supported. Please pass the '
'Tensor to monitor directly.')
# Insert layers into the Keras Graph Network.
aggregation = None if from_metric_obj else 'mean'
self._graph_network_add_metric(value, aggregation, name)
@doc_controls.do_not_doc_inheritable
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Args:
updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False`
on this Layer, when executing in Eager mode.
inputs: Deprecated, will be automatically inferred.
"""
if inputs is not None:
tf_logging.warning(
'`add_update` `inputs` kwarg has been deprecated. You no longer need '
'to pass a value to `inputs` as it is being automatically inferred.')
call_context = base_layer_utils.call_context()
# No need to run updates during Functional API construction.
if call_context.in_keras_graph:
return
# Callable updates are disabled by setting `trainable=False`.
if not call_context.frozen:
for update in tf.nest.flatten(updates):
if callable(update):
update() # pylint: disable=not-callable
def set_weights(self, weights):
"""Sets the weights of the layer, from NumPy arrays.
The weights of a layer represent the state of the layer. This function
sets the weight values from numpy arrays. The weight values should be
passed in the order they are created by the layer. Note that the layer's
weights must be instantiated before calling this function, by calling
the layer.
For example, a `Dense` layer returns a list of two values: the kernel matrix
and the bias vector. These can be used to set the weights of another
`Dense` layer:
>>> layer_a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> layer_a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> layer_b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b.set_weights(layer_a.get_weights())
>>> layer_b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Args:
weights: a list of NumPy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
expected_num_weights = 0
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
expected_num_weights += param.num_tensors
else:
expected_num_weights += 1
if expected_num_weights != len(weights):
raise ValueError(
'You called `set_weights(weights)` on layer "%s" '
'with a weight list of length %s, but the layer was '
'expecting %s weights. Provided weights: %s...' %
(self.name, len(weights), expected_num_weights, str(weights)[:50]))
weight_index = 0
weight_value_tuples = []
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
num_tensors = param.num_tensors
tensors = weights[weight_index:weight_index + num_tensors]
param.set_weights(tensors)
weight_index += num_tensors
else:
weight = weights[weight_index]
weight_shape = weight.shape if hasattr(weight, 'shape') else ()
ref_shape = param.shape
if not ref_shape.is_compatible_with(weight_shape):
raise ValueError(
f'Layer {self.name} weight shape {ref_shape} '
'is not compatible with provided weight '
f'shape {weight_shape}.')
weight_value_tuples.append((param, weight))
weight_index += 1
backend.batch_set_value(weight_value_tuples)
# Perform any layer defined finalization of the layer state.
for layer in self._flatten_layers():
layer.finalize_state()
def get_weights(self):
"""Returns the current weights of the layer, as NumPy arrays.
The weights of a layer represent the state of the layer. This function
returns both trainable and non-trainable weight values associated with this
layer as a list of NumPy arrays, which can in turn be used to load state
into similarly parameterized layers.
For example, a `Dense` layer returns a list of two values: the kernel matrix
and the bias vector. These can be used to set the weights of another
`Dense` layer:
>>> layer_a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> layer_a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> layer_b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b.set_weights(layer_a.get_weights())
>>> layer_b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Returns:
Weights values as a list of NumPy arrays.
"""
weights = self.weights
output_weights = []
for weight in weights:
if isinstance(weight, base_layer_utils.TrackableWeightHandler):
output_weights.extend(weight.get_tensors())
else:
output_weights.append(weight)
return backend.batch_get_value(output_weights)
@doc_controls.do_not_generate_docs
def finalize_state(self):
"""Finalizes the layers state after updating layer weights.
This function can be subclassed in a layer and will be called after updating
a layer weights. It can be overridden to finalize any additional layer state
after a weight update.
This function will be called after weights of a layer have been restored
from a loaded model.
"""
pass
@doc_controls.do_not_generate_docs
def get_updates_for(self, inputs):
"""Deprecated, do NOT use!
Retrieves updates relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
"""
warnings.warn(
'`layer.get_updates_for` is deprecated and '
'will be removed in a future version. '
'Please use `layer.updates` method instead.',
stacklevel=2)
return self.updates
@doc_controls.do_not_generate_docs
def get_losses_for(self, inputs):
"""Deprecated, do NOT use!
Retrieves losses relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
"""
warnings.warn(
'`layer.get_losses_for` is deprecated and '
'will be removed in a future version. '
'Please use `layer.losses` instead.',
stacklevel=2)
return self.losses
@doc_controls.do_not_doc_inheritable
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@doc_controls.do_not_doc_inheritable
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
@doc_controls.do_not_doc_inheritable
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
@doc_controls.do_not_doc_inheritable
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@doc_controls.do_not_doc_inheritable
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
@doc_controls.do_not_doc_inheritable
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
@doc_controls.do_not_doc_inheritable
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first input node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
@doc_controls.do_not_doc_inheritable
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first output node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
@doc_controls.do_not_doc_inheritable
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError(f'The layer "{self.name}" has never been called '
'and thus has no defined input shape. Note that the '
'`input_shape` property is only available for '
'Functional and Sequential models.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if getattr(self, '_is_graph_network', False):
with tf_utils.maybe_init_scope(self):
self._maybe_build(self.inputs)
else:
raise ValueError('You tried to call `count_params` '
f'on layer {self.name}'
', but the layer isn\'t built. '
'You can build it manually via: '
f'`{self.name}.build(batch_input_shape)`.')
return layer_utils.count_params(self.weights)
@property
@doc_controls.do_not_doc_inheritable
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError(f'The layer "{self.name}" has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
@doc_controls.do_not_doc_inheritable
def apply(self, inputs, *args, **kwargs):
"""Deprecated, do NOT use!
This is an alias of `self.__call__`.
Args:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
warnings.warn(
'`layer.apply` is deprecated and '
'will be removed in a future version. '
'Please use `layer.__call__` method instead.',
stacklevel=2)
return self.__call__(inputs, *args, **kwargs)
@doc_controls.do_not_doc_inheritable
def add_variable(self, *args, **kwargs):
"""Deprecated, do NOT use! Alias for `add_weight`."""
warnings.warn(
'`layer.add_variable` is deprecated and '
'will be removed in a future version. '
'Please use `layer.add_weight` method instead.',
stacklevel=2)
return self.add_weight(*args, **kwargs)
@property
@doc_controls.do_not_generate_docs
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Note: This will not track the weights of nested `tf.Modules` that are not
themselves Keras layers.
Returns:
A list of variables.
"""
return self.weights
@property
@doc_controls.do_not_generate_docs
def trainable_variables(self):
return self.trainable_weights
@property
@doc_controls.do_not_generate_docs
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
@property
def _inbound_nodes(self):
return self._inbound_nodes_value
@_inbound_nodes.setter
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _inbound_nodes(self, value):
self._inbound_nodes_value = value
@property
def _outbound_nodes(self):
return self._outbound_nodes_value
@_outbound_nodes.setter
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _outbound_nodes(self, value):
self._outbound_nodes_value = value
def _set_dtype_policy(self, dtype):
"""Sets self._dtype_policy."""
if isinstance(dtype, policy.Policy):
self._dtype_policy = dtype
elif isinstance(dtype, dict):
self._dtype_policy = policy.deserialize(dtype)
elif isinstance(dtype, str) and dtype in ('mixed_float16',
'mixed_bfloat16'):
# The isinstance check is required since np.dtype raises an error if
# compared to a non-dtype string.
self._dtype_policy = policy.Policy(dtype)
elif dtype:
self._dtype_policy = policy.Policy(tf.as_dtype(dtype).name)
else:
self._dtype_policy = policy.global_policy()
if (self._dtype_policy.name == 'mixed_float16' and
not loss_scale_optimizer.strategy_supports_loss_scaling()):
# Although only loss scaling doesn't support certain strategies, to avoid
# confusion, we disallow the 'mixed_float16' policy with unsupported
# strategies. This is because 'mixed_float16' requires loss scaling for
# numeric stability.
strategy = tf.distribute.get_strategy()
raise ValueError('Mixed precision is not supported with the '
'tf.distribute.Strategy: %s. Either stop using mixed '
'precision by removing the use of the "%s" policy or '
'use a different Strategy, e.g. a MirroredStrategy.' %
(strategy.__class__.__name__, self._dtype_policy.name))
# Performance optimization: cache the compute dtype as a Dtype object or
# None, so that str to Dtype conversion doesn't happen in Layer.__call__.
# TODO(b/157486353): Investigate returning DTypes in Policy.
if self._dtype_policy.compute_dtype:
self._compute_dtype_object = tf.as_dtype(
self._dtype_policy.compute_dtype)
else:
self._compute_dtype_object = None
@property
def dtype_policy(self):
"""The dtype policy associated with this layer.
This is an instance of a `tf.keras.mixed_precision.Policy`.
"""
return self._dtype_policy
@property
def compute_dtype(self):
"""The dtype of the layer's computations.
This is equivalent to `Layer.dtype_policy.compute_dtype`. Unless
mixed precision is used, this is the same as `Layer.dtype`, the dtype of
the weights.
Layers automatically cast their inputs to the compute dtype, which causes
computations and the output to be in the compute dtype as well. This is done
by the base Layer class in `Layer.__call__`, so you do not have to insert
these casts if implementing your own layer.
Layers often perform certain internal computations in higher precision when
`compute_dtype` is float16 or bfloat16 for numeric stability. The output
will still typically be float16 or bfloat16 in such cases.
Returns:
The layer's compute dtype.
"""
return self._dtype_policy.compute_dtype
@property
def _compute_dtype(self):
"""Deprecated alias of `compute_dtype`."""
return self._dtype_policy.compute_dtype
@property
def variable_dtype(self):
"""Alias of `Layer.dtype`, the dtype of the weights."""
return self.dtype
def _maybe_cast_inputs(self, inputs, input_list=None):
"""Maybe casts the inputs to the compute dtype.
If self._compute_dtype is floating-point, and self_autocast is True,
floating-point inputs are casted to self._compute_dtype.
Args:
inputs: Input tensor, or structure of input tensors.
input_list: Flat list of input tensors.
Returns:
`inputs`, but tensors may have been casted to self._compute_dtype
"""
if not input_list:
input_list = tf.nest.flatten(inputs)
compute_dtype_object = self._compute_dtype_object
should_autocast = (
self._autocast and compute_dtype_object and
compute_dtype_object.is_floating)
if (should_autocast and
any(map(self._should_cast_single_input, input_list))):
# Only perform expensive `nest` operation when needed.
return tf.nest.map_structure(self._cast_single_input, inputs)
else:
return inputs
def _should_cast_single_input(self, x):
if isinstance(x, _AUTOCAST_TYPES):
return (self._compute_dtype_object and
x.dtype != self._compute_dtype_object and x.dtype.is_floating)
return False
def _cast_single_input(self, x):
"""Cast a single Tensor or TensorSpec to the compute dtype."""
if self._should_cast_single_input(x):
return tf.cast(x, self._compute_dtype_object)
else:
return x
# _dtype used to be an attribute set in the constructor. We still expose it
# because some clients still use it.
# TODO(reedwm): Deprecate, then remove the _dtype property.
@property
def _dtype(self):
# This is equivalent to returning self.dtype . We do not return self.dtype
# as it would cause infinite recursion in a few subclasses, which override
# "dtype" to return self._dtype.
return self._dtype_policy.variable_dtype
@_dtype.setter
def _dtype(self, value):
value = tf.as_dtype(value).name
self._set_dtype_policy(policy.Policy(value))
def _name_scope(self): # pylint: disable=method-hidden
if not tf.__internal__.tf2.enabled():
return self.name
name_scope = self.name
if _is_name_scope_on_model_declaration_enabled and self._outer_name_scope:
name_scope = self._outer_name_scope + '/' + name_scope
current_name_scope = tf.__internal__.get_name_scope()
if current_name_scope:
name_scope = current_name_scope + '/' + name_scope
if name_scope:
# Note that the trailing `/` prevents autogenerated
# numerical suffixes to get appended. It will also fully reset
# nested name scope (i.e. the outer name scope has no effect).
name_scope += '/'
return name_scope
def _init_set_name(self, name, zero_based=True):
if name is None:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
elif isinstance(name, str):
backend.observe_object_name(name)
self._name = name
else:
raise TypeError(
f'Expected `name` argument to be a string, but got: {name}')
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with backend.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if base_layer_utils.is_split_variable(variable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = tf.nest.flatten(outputs)
with backend.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = tf.cast(
tf.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
self.add_loss(mean_activity_loss)
def _set_mask_metadata(self, inputs, outputs, previous_mask, build_graph):
# Many `Layer`s don't need to call `compute_mask`.
# This method is optimized to do as little work as needed for the common
# case.
if not self._supports_masking:
return
flat_outputs = tf.nest.flatten(outputs)
mask_already_computed = (
getattr(self, '_compute_output_and_mask_jointly', False) or
all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))
if mask_already_computed:
if build_graph:
self._set_mask_keras_history_checked(flat_outputs)
return
output_masks = self.compute_mask(inputs, previous_mask)
if output_masks is None:
return
flat_masks = tf.nest.flatten(output_masks)
for tensor, mask in zip(flat_outputs, flat_masks):
try:
tensor._keras_mask = mask
except AttributeError:
# C Type such as np.ndarray.
pass
if build_graph:
self._set_mask_keras_history_checked(flat_outputs)
def _set_mask_keras_history_checked(self, flat_outputs):
for output in flat_outputs:
if getattr(output, '_keras_mask', None) is not None:
# Do not track masks for `TensorFlowOpLayer` construction.
output._keras_mask._keras_history_checked = True
def _get_input_masks(self, inputs, input_list, args, kwargs):
if not self._supports_masking and not self._expects_mask_arg:
# Input masks only need to be retrieved if they are needed for `call`
# or `compute_mask`.
input_masks = None
implicit_mask = False
elif self._call_arg_was_passed('mask', args, kwargs):
input_masks = self._get_call_arg_value('mask', args, kwargs)
implicit_mask = False
else:
input_masks = [getattr(t, '_keras_mask', None) for t in input_list]
if all(mask is None for mask in input_masks):
input_masks = None
implicit_mask = False
else:
# Only do expensive `nest` op when masking is actually being used.
input_masks = tf.nest.pack_sequence_as(inputs, input_masks)
implicit_mask = True
return input_masks, implicit_mask
def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):
# Performance optimization: do no work in most common case.
if not args and not kwargs:
return False
if arg_name in kwargs:
return True
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
return arg_name in dict(zip(call_fn_args, args))
def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return kwargs[arg_name]
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
args_dict = dict(zip(call_fn_args, args))
return args_dict[arg_name]
def _set_call_arg_value(
self, arg_name, new_value, args,
kwargs, inputs_in_args=False, pop_kwarg_if_none=False):
arg_pos = self._call_fn_arg_positions.get(arg_name, None)
if arg_pos is not None:
if not inputs_in_args:
# Ignore `inputs` arg.
arg_pos = arg_pos - 1
if len(args) > arg_pos:
args = list(args)
args[arg_pos] = new_value
return tuple(args), kwargs
if new_value is None and pop_kwarg_if_none:
kwargs.pop(arg_name, None)
else:
kwargs[arg_name] = new_value
return args, kwargs
def _set_connectivity_metadata(self, args, kwargs, outputs):
# If the layer returns tensors from its inputs unmodified,
# we copy them to avoid loss of KerasHistory metadata.
flat_outputs = tf.nest.flatten(outputs)
flat_inputs = tf.nest.flatten((args, kwargs))
input_ids_set = {id(i) for i in flat_inputs}
outputs_copy = []
for x in flat_outputs:
if id(x) in input_ids_set:
with backend.name_scope(self.name):
x = tf.identity(x)
outputs_copy.append(x)
outputs = tf.nest.pack_sequence_as(outputs, outputs_copy)
# Create node, Node wires itself to inbound and outbound layers.
# The Node constructor actually updates this layer's self._inbound_nodes,
# sets _keras_history on the outputs, and adds itself to the
# `_outbound_nodes` of the layers that produced the inputs to this
# layer call.
node_module.Node(self, call_args=args, call_kwargs=kwargs, outputs=outputs)
return outputs
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Args:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError(f'The layer {self.name} has never been called '
'and thus has no defined {attr_name}.')
if not len(self._inbound_nodes) > node_index:
raise ValueError(f'Asked to get {attr_name} at node '
f'{node_index}, but the layer has only '
f'{len(self._inbound_nodes)} inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
if not self.built:
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = tf.nest.flatten(inputs)
if input_list and self._dtype_policy.compute_dtype is None:
try:
dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
else:
self._set_dtype_policy(policy.Policy(dtype))
input_shapes = None
# Converts Tensors / CompositeTensors to TensorShapes.
if any(hasattr(x, 'shape') for x in input_list):
input_shapes = tf_utils.get_shapes(inputs)
else:
# Converts input shape to TensorShapes.
try:
input_shapes = tf_utils.convert_shapes(inputs, to_tuples=False)
except ValueError:
pass
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
self.build(input_shapes) # pylint:disable=not-callable
# We must set also ensure that the layer is marked as built, and the build
# shape is stored since user defined build functions may not be calling
# `super.build()`
Layer.build(self, input_shapes)
# Optionally load weight values specified at layer instantiation.
if self._initial_weights is not None:
with tf.init_scope():
# Using `init_scope` since we want variable assignment in
# `set_weights` to be treated like variable initialization.
self.set_weights(self._initial_weights)
self._initial_weights = None
def _symbolic_call(self, inputs):
input_shapes = tf.nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
# Convert to TensorShape so that nest.map_structure will not map into
# individual dim of the shape.
output_shapes = tf_utils.convert_shapes(output_shapes, to_tuples=False)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
return tf.nest.map_structure(_make_placeholder_like, output_shapes)
def _get_trainable_state(self):
"""Get the `trainable` state of each sublayer.
Returns:
A dict mapping all sublayers to their `trainable` value.
"""
trainable_state = weakref.WeakKeyDictionary()
for layer in self._flatten_layers():
trainable_state[layer] = layer.trainable
return trainable_state
def _set_trainable_state(self, trainable_state):
"""Set `trainable` state for each sublayer."""
for layer in self._flatten_layers():
if layer in trainable_state:
layer.trainable = trainable_state[layer]
@property
def _obj_reference_counts(self):
"""A dictionary counting the number of attributes referencing an object."""
self._maybe_create_attribute('_obj_reference_counts_dict',
object_identity.ObjectIdentityDictionary())
return self._obj_reference_counts_dict
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _maybe_create_attribute(self, name, default_value):
"""Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
"""
if not hasattr(self, name):
self.__setattr__(name, default_value)
def __delattr__(self, name):
# For any super.__delattr__() call, we will directly use the implementation
# in Trackable and skip the behavior in AutoTrackable. The Layer was
# originally use Trackable as base class, the change of using Module as base
# class forced us to have AutoTrackable in the class hierarchy.
#
# TODO(b/180760306) Keeping the status quo of skipping _delattr__ and
# __setattr__ in AutoTrackable may be unsustainable.
existing_value = getattr(self, name, None)
# If this value is replacing an existing object assigned to an attribute, we
# should clean it out to avoid leaking memory. First we check if there are
# other attributes referencing it.
reference_counts = self._obj_reference_counts
if existing_value not in reference_counts:
super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
return
reference_count = reference_counts[existing_value]
if reference_count > 1:
# There are other remaining references. We can't remove this object from
# _layers etc.
reference_counts[existing_value] = reference_count - 1
super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
return
else:
# This is the last remaining reference.
del reference_counts[existing_value]
super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
if (isinstance(existing_value, Layer)
or base_layer_utils.has_weights(existing_value)):
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_self_tracked_trackables',
[l for l in self._self_tracked_trackables if l is not existing_value])
if isinstance(existing_value, tf.Variable):
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_trainable_weights',
[w for w in self._trainable_weights if w is not existing_value])
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_non_trainable_weights',
[w for w in self._non_trainable_weights if w is not existing_value])
def __setattr__(self, name, value):
if (name == '_self_setattr_tracking' or
not getattr(self, '_self_setattr_tracking', True) or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)):
try:
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call
except AttributeError:
raise AttributeError(
('Can\'t set the attribute "{}", likely because it conflicts with '
'an existing read-only @property of the object. Please choose a '
'different name.').format(name))
return
# Wraps data structures in `Trackable`, unwraps `NoDependency` objects.
value = tf.__internal__.tracking.sticky_attribute_assignment(
trackable=self, value=value, name=name)
reference_counts = self._obj_reference_counts
reference_counts[value] = reference_counts.get(value, 0) + 1
# Clean out the old attribute, which clears _layers and _trainable_weights
# if necessary.
try:
self.__delattr__(name)
except AttributeError:
pass
# Keep track of metric instance created in subclassed layer.
for val in tf.nest.flatten(value):
if isinstance(val, metrics_mod.Metric) and hasattr(self, '_metrics'):
self._metrics.append(val)
# Append value to self._self_tracked_trackables if relevant
if (getattr(self, '_auto_track_sub_layers', True) and
(isinstance(value, tf.Module) or
base_layer_utils.has_weights(value))):
self._maybe_create_attribute('_self_tracked_trackables', [])
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._self_tracked_trackables)):
self._self_tracked_trackables.append(value)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
# TODO(b/125122625): This won't pick up on any variables added to a
# list/dict after creation.
for val in tf.nest.flatten(value, expand_composites=True):
if not isinstance(val, tf.Variable):
continue
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
if val.trainable:
if any(val is w for w in self._trainable_weights):
continue
self._trainable_weights.append(val)
else:
if any(val is w for w in self._non_trainable_weights):
continue
self._non_trainable_weights.append(val)
backend.track_variable(val)
# TODO(b/180760306) Skip the auto trackable from tf.Module to keep status
# quo. See the comment at __delattr__.
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call
def _gather_children_attribute(self, attribute):
assert attribute in {
'variables', 'trainable_variables', 'non_trainable_variables'
}
if hasattr(self, '_self_tracked_trackables'):
nested_layers = self._flatten_modules(include_self=False, recursive=False)
return list(
itertools.chain.from_iterable(
getattr(layer, attribute) for layer in nested_layers))
return []
def _flatten_layers(self, recursive=True, include_self=True):
for m in self._flatten_modules(
recursive=recursive, include_self=include_self):
if isinstance(m, Layer):
yield m
def _flatten_modules(self, recursive=True, include_self=True):
"""Flattens `tf.Module` instances (excluding `Metrics`).
Args:
recursive: Whether to recursively flatten through submodules.
include_self: Whether to include this `Layer` instance.
Yields:
`tf.Module` instance tracked by this `Layer`.
"""
if include_self:
yield self
# Only instantiate set and deque if needed.
trackables = getattr(self, '_self_tracked_trackables', None)
if trackables:
seen_object_ids = set()
deque = collections.deque(trackables)
while deque:
trackable_obj = deque.popleft()
trackable_id = id(trackable_obj)
if trackable_id in seen_object_ids:
continue
seen_object_ids.add(trackable_id)
# Metrics are not considered part of the Layer's topology.
if (isinstance(trackable_obj, tf.Module) and
not isinstance(trackable_obj, metrics_mod.Metric)):
yield trackable_obj
# Introspect recursively through sublayers.
if recursive:
subtrackables = getattr(trackable_obj, '_self_tracked_trackables',
None)
if subtrackables:
deque.extendleft(reversed(subtrackables))
elif isinstance(trackable_obj, tf.__internal__.tracking.TrackableDataStructure):
# Data structures are introspected even with `recursive=False`.
tracked_values = trackable_obj._values
if tracked_values:
deque.extendleft(reversed(tracked_values))
# This is a hack so that the is_layer (within
# training/trackable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
def _init_call_fn_args(self, expects_training_arg=None):
# Clear cached call function arguments.
self.__class__._call_full_argspec.fget.cache.pop(self, None)
self.__class__._call_fn_args.fget.cache.pop(self, None)
self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)
call_fn_args = self._call_fn_args
call_fn_args += self._call_full_argspec.kwonlyargs or []
if expects_training_arg is None:
self._expects_training_arg = ('training' in call_fn_args or
self._call_accepts_kwargs)
else:
# Use value encoded into the metadata when loading from the SavedModel.
self._expects_training_arg = expects_training_arg
# The default training arg will be any (non-None) default specified in the
# method signature, or None if no value is specified.
call_fn_arg_defaults = self._call_fn_arg_defaults.copy()
call_fn_arg_defaults.update(self._call_full_argspec.kwonlydefaults or {})
self._default_training_arg = call_fn_arg_defaults.get('training')
self._expects_mask_arg = ('mask' in call_fn_args or
self._call_accepts_kwargs)
@property
@layer_utils.cached_per_instance
def _call_full_argspec(self):
# Argspec inspection is expensive and the call spec is used often, so it
# makes sense to cache the result.
return tf_inspect.getfullargspec(self.call)
@property
@layer_utils.cached_per_instance
def _call_fn_args(self):
all_args = self._call_full_argspec.args
# Scrub `self` that appears if a decorator was applied.
if all_args and all_args[0] == 'self':
return all_args[1:]
return all_args
@property
@layer_utils.cached_per_instance
def _call_fn_arg_defaults(self):
call_fn_args = self._call_fn_args
call_fn_defaults = self._call_full_argspec.defaults or []
defaults = dict()
# The call arg defaults are an n-tuple of the last n elements of the args
# list. (n = # of elements that have a default argument)
for i in range(-1 * len(call_fn_defaults), 0):
defaults[call_fn_args[i]] = call_fn_defaults[i]
return defaults
@property
@layer_utils.cached_per_instance
def _call_fn_arg_positions(self):
call_fn_arg_positions = dict()
for pos, arg in enumerate(self._call_fn_args):
call_fn_arg_positions[arg] = pos
return call_fn_arg_positions
@property
@layer_utils.cached_per_instance
def _call_accepts_kwargs(self):
return self._call_full_argspec.varkw is not None
@property
def _eager_losses(self):
# A list of loss values containing activity regularizers and losses
# manually added through `add_loss` during eager execution. It is cleared
# after every batch.
# Because we plan on eventually allowing a same model instance to be trained
# in eager mode or graph mode alternatively, we need to keep track of
# eager losses and symbolic losses via separate attributes.
if not hasattr(self._thread_local, '_eager_losses'):
self._thread_local._eager_losses = []
return self._thread_local._eager_losses
@_eager_losses.setter
def _eager_losses(self, losses):
self._thread_local._eager_losses = losses
def _dedup_weights(self, weights):
"""Dedupe weights while maintaining order as much as possible."""
output, seen_ids = [], set()
for w in weights:
if id(w) not in seen_ids:
output.append(w)
# Track the Variable's identity to avoid __eq__ issues.
seen_ids.add(id(w))
return output
def _split_out_first_arg(self, args, kwargs):
# Grab the argument corresponding to the first argument in the
# layer's `call` method spec. This will either be the first positional
# argument, or it will be provided as a keyword argument.
if args:
inputs = args[0]
args = args[1:]
elif self._call_fn_args[0] in kwargs:
kwargs = copy.copy(kwargs)
inputs = kwargs.pop(self._call_fn_args[0])
else:
raise ValueError(
'The first argument to `Layer.call` must always be passed.')
return inputs, args, kwargs
# SavedModel properties. Please see keras/saving/saved_model for details.
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _set_save_spec(self, inputs, args=None, kwargs=None):
"""Defines the save spec so that serialization is able to trace layer call.
The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are
saved into a tuple of `([inputs] + args, kwargs)`.
Args:
inputs: possibly nested inputs passed into the call function.
args: a list of positional arguments passed into call.
kwargs: a dictionary of keyword arguments passed into call.
"""
if self._saved_model_inputs_spec is not None:
return # Already set.
args = args or []
kwargs = kwargs or {}
inputs_spec = tf.nest.map_structure(tf_utils.get_tensor_spec, inputs)
# Filter out non-tensor arguments from args and kwargs.
args_spec = []
for arg in args:
flat_arg = tf.nest.flatten(arg)
flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_arg]
if any(s is None for s in flat_specs):
break # Stop recording positional args once a non-tensor has been found
args_spec.append(tf.nest.pack_sequence_as(arg, flat_specs))
kwargs_spec = {}
for key, kwarg in kwargs.items():
if key == 'training':
continue
flat_kwarg = tf.nest.flatten(kwarg)
flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg]
if any(s is None for s in flat_specs):
continue
kwargs[key] = args_spec.append(
tf.nest.pack_sequence_as(kwarg, flat_specs))
self._saved_model_inputs_spec = inputs_spec
self._saved_model_arg_spec = ([inputs_spec] + args_spec, kwargs_spec)
def _get_save_spec(self, dynamic_batch=True, inputs_only=True):
if self._saved_model_inputs_spec is None:
return None
spec = tf.nest.map_structure(
lambda t: tf_utils.get_tensor_spec(t, dynamic_batch=dynamic_batch),
self._saved_model_arg_spec)
return spec[0][0] if inputs_only else spec
@property
def _trackable_saved_model_saver(self):
return layer_serialization.LayerSavedModelSaver(self)
@property
def _object_identifier(self):
return self._trackable_saved_model_saver.object_identifier
@property
def _tracking_metadata(self):
"""Info about this layer to be saved into the SavedModel."""
return self._trackable_saved_model_saver.tracking_metadata
def _list_extra_dependencies_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_extra_dependencies_for_serialization(serialization_cache))
def _list_functions_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_functions_for_serialization(serialization_cache))
@property
def _use_input_spec_as_call_signature(self):
# Whether input spec can be used as the call signature when tracing the
# Layer for SavedModel. By default, this is set to `True` for layers
# exported from the Keras library, because the layers more rigidly define
# the `input_specs` property (many custom layers only set the `ndims`)
return get_canonical_name_for_symbol(type(self),
api_name='keras') is not None
def __getstate__(self):
# Override to support `copy.deepcopy` and pickling.
# Thread-local objects cannot be copied in Python 3, so pop these.
# Thread-local objects are used to cache losses in MirroredStrategy, and
# so shouldn't be copied.
state = self.__dict__.copy()
state.pop('_thread_local', None)
state.pop('_metrics_lock', None)
return state
def __setstate__(self, state):
state['_thread_local'] = threading.local()
state['_metrics_lock'] = threading.Lock()
# Bypass Trackable logic as `__dict__` already contains this info.
object.__setattr__(self, '__dict__', state)
class TensorFlowOpLayer(Layer):
"""Wraps a TensorFlow Operation in a Layer.
This class is used internally by the Functional API. When a user
uses a raw TensorFlow Operation on symbolic tensors originating
from an `Input` Layer, the resultant operation will be wrapped
with this Layer object in order to make the operation compatible
with the Keras API.
This Layer will create a new, identical operation (except for inputs
and outputs) every time it is called. If `run_eagerly` is `True`,
the op creation and calculation will happen inside an Eager function.
Instances of this Layer are created when `autolambda` is called, which
is whenever a Layer's `__call__` encounters symbolic inputs that do
not have Keras metadata, or when a Network's `__init__` encounters
outputs that do not have Keras metadata.
Attributes:
node_def: String, the serialized NodeDef of the Op this layer will wrap.
name: String, the name of the Layer.
constants: Dict of NumPy arrays, the values of any Tensors needed for this
Operation that do not originate from a Keras `Input` Layer. Since all
placeholders must come from Keras `Input` Layers, these Tensors must be
treated as constant in the Functional API.
trainable: Bool, whether this Layer is trainable. Currently Variables are
not supported, and so this parameter has no effect.
dtype: The default dtype of this Layer. Inherited from `Layer` and has no
effect on this class, however is used in `get_config`.
"""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self,
node_def,
name,
constants=None,
trainable=True,
dtype=None):
# Pass autocast=False, as if inputs are cast, input types might not match
# Operation type.
super(TensorFlowOpLayer, self).__init__(
name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype,
autocast=False)
if isinstance(node_def, dict):
self.node_def = json_format.ParseDict(node_def, tf.compat.v1.NodeDef())
else:
if not isinstance(node_def, bytes):
node_def = node_def.encode('utf-8')
self.node_def = tf.compat.v1.NodeDef.FromString(node_def)
# JSON serialization stringifies keys which are integer input indices.
self.constants = ({
int(index): constant for index, constant in constants.items()
} if constants is not None else {})
# Layer uses original op unless it is called on new inputs.
# This means `built` is not set in `__call__`.
self.built = True
# Do not individually trace TensorflowOpLayers in the SavedModel.
self._must_restore_from_config = True
def call(self, inputs):
if tf.executing_eagerly():
return self._defun_call(inputs)
return self._make_op(inputs)
def _make_node_def(self, graph):
node_def = tf.compat.v1.NodeDef()
node_def.CopyFrom(self.node_def)
# Used in TPUReplicateContext to indicate whether this node has been cloned
# and to not add TPU attributes.
node_def.attr['_cloned'].b = True
node_def.name = graph.unique_name(node_def.name)
return node_def
def _make_op(self, inputs):
inputs = tf.nest.flatten(inputs)
graph = inputs[0].graph
node_def = self._make_node_def(graph)
with graph.as_default():
for index, constant in self.constants.items():
# Recreate constant in graph to add distribution context.
value = tf.get_static_value(constant)
if value is not None:
constant = tf.constant(value, name=node_def.input[index])
inputs.insert(index, constant)
# TODO(b/183990973): We should drop or consolidate these private api calls
# for adding an op to the graph and recording its gradient.
c_op = tf.__internal__.create_c_op(graph, node_def, inputs, control_inputs=[])
op = graph._create_op_from_tf_operation(c_op)
op._control_flow_post_processing()
# Record the gradient because custom-made ops don't go through the
# code-gen'd eager call path
op_type = tf.compat.as_str(op.op_def.name)
attr_names = [tf.compat.as_str(attr.name) for attr in op.op_def.attr]
attrs = []
for attr_name in attr_names:
attrs.append(attr_name)
attrs.append(op.get_attr(attr_name))
attrs = tuple(attrs)
tf.__internal__.record_gradient(op_type, op.inputs, attrs, op.outputs)
if len(op.outputs) == 1:
return op.outputs[0]
return op.outputs
@tf.function
def _defun_call(self, inputs):
"""Wraps the op creation method in an Eager function for `run_eagerly`."""
return self._make_op(inputs)
def get_config(self):
config = super(TensorFlowOpLayer, self).get_config()
config.update({
# `__init__` prefixes the name. Revert to the constructor argument.
'name': config['name'][len(_TF_OP_LAYER_NAME_PREFIX):],
'node_def': json_format.MessageToDict(self.node_def),
'constants': {
i: backend.get_value(c) for i, c in self.constants.items()
}
})
return config
class AddLoss(Layer):
"""Adds its inputs as a loss.
Attributes:
unconditional: Whether or not the loss should be conditioned on the inputs.
"""
def __init__(self, unconditional, **kwargs):
# Pass autocast=False, as there is no reason to cast loss to a different
# dtype.
kwargs['autocast'] = False
super(AddLoss, self).__init__(**kwargs)
self.unconditional = unconditional
def call(self, inputs):
self.add_loss(inputs, inputs=(not self.unconditional))
return inputs
def get_config(self):
config = super(AddLoss, self).get_config()
config.update({'unconditional': self.unconditional})
return config
class AddMetric(Layer):
"""Adds its inputs as a metric.
Attributes:
aggregation: 'mean' or None. How the inputs should be aggregated.
metric_name: The name to use for this metric.
"""
def __init__(self, aggregation=None, metric_name=None, **kwargs):
super(AddMetric, self).__init__(**kwargs)
self.aggregation = aggregation
self.metric_name = metric_name
def call(self, inputs):
self.add_metric(inputs, aggregation=self.aggregation, name=self.metric_name)
return inputs
def get_config(self):
config = super(AddMetric, self).get_config()
config.update({
'aggregation': self.aggregation,
'metric_name': self.metric_name
})
return config
def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list): # pylint: disable=unused-argument
"""Check the arguments to see if we are constructing a functional model."""
# We are constructing a functional model if any of the inputs
# are KerasTensors
return any(
isinstance(tensor, keras_tensor.KerasTensor)
for tensor in tf.nest.flatten([inputs, args, kwargs]))
def _convert_numpy_or_python_types(x):
if isinstance(x, (tf.Tensor, np.ndarray, float, int)):
return tf.convert_to_tensor(x)
return x
@keras_export(
'keras.__internal__.apply_name_scope_on_model_declaration', v1=[])
def _apply_name_scope_on_model_declaration(enable):
"""Apply `with tf.name_scope(...)` on model declaration.
```python
tf.keras.__internal__.apply_name_scope_on_model_declaration(True)
inputs = input_layer.Input((3,))
with tf.name_scope('MyScope'):
outputs = layers.Dense(10, name='MyDense')(inputs)
model = tf.keras.Model(inputs, outputs)
# with `tf.keras.__internal__.apply_name_scope_on_model_declaration(True)`,
# The name of the dense layer is "model/MyScope/MyDense/*", and without,
# "model/MyDense/*"
```
Args:
enable: Enables if `True`, disables if `False`.
"""
if not isinstance(enable, bool):
raise TypeError(
'`enable` argument must be `True` or `False`, got {}'.format(enable))
global _is_name_scope_on_model_declaration_enabled
_is_name_scope_on_model_declaration_enabled = enable
class BaseRandomLayer(Layer):
"""A layer handle the random number creation and savemodel behavior."""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self, seed=None, force_generator=False, **kwargs):
"""Initialize the BaseRandomLayer.
Note that the constructor is annotated with
@no_automatic_dependency_tracking. This is to skip the auto
tracking of self._random_generator instance, which is an AutoTrackable.
The backend.RandomGenerator could contain a tf.random.Generator instance
which will have tf.Variable as the internal state. We want to avoid saving
that state into model.weights and checkpoints for backward compatibility
reason. In the meantime, we still need to make them visible to SavedModel
when it is tracing the tf.function for the `call()`.
See _list_extra_dependencies_for_serialization below for more details.
Args:
seed: optional integer, used to create RandomGenerator.
force_generator: boolean, default to False, whether to force the
RandomGenerator to use the code branch of tf.random.Generator.
**kwargs: other keyword arguments that will be passed to the parent class
"""
super().__init__(**kwargs)
self._random_generator = backend.RandomGenerator(
seed, force_generator=force_generator)
def _list_extra_dependencies_for_serialization(self, serialization_cache):
# This method exposes the self._random_generator to SavedModel only
# (not layer.weights and checkpoint).
deps = super()._list_extra_dependencies_for_serialization(
serialization_cache)
deps['_random_generator'] = self._random_generator
return deps
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
modify .input_shape()
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=g-classes-have-attributes
# pylint: disable=g-bad-import-order
"""Contains the base Layer class, from which all layers inherit."""
import tensorflow.compat.v2 as tf
import collections
import copy
import functools
import itertools
import textwrap
import threading
import warnings
import weakref
import numpy as np
from google.protobuf import json_format
from keras import backend
from keras import constraints
from keras import initializers
from keras import regularizers
from keras.engine import base_layer_utils
from keras.engine import input_spec
from keras.engine import keras_tensor
from keras.engine import node as node_module
from keras.mixed_precision import autocast_variable
from keras.mixed_precision import loss_scale_optimizer
from keras.mixed_precision import policy
from keras.saving.saved_model import layer_serialization
from keras.utils import generic_utils
from keras.utils import layer_utils
from keras.utils import object_identity
from keras.utils import tf_inspect
from keras.utils import tf_utils
from keras.utils import traceback_utils
from keras.utils import version_utils
# A module that only depends on `keras.layers` import these from here.
from keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import
from keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import get_canonical_name_for_symbol
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
# pylint: disable=g-inconsistent-quotes
metrics_mod = generic_utils.LazyLoader(
"metrics_mod", globals(),
"keras.metrics")
# pylint: enable=g-inconsistent-quotes
# Prefix that is added to the TF op layer names.
_TF_OP_LAYER_NAME_PREFIX = 'tf_op_layer_'
# TODO(mdan): Should we have a single generic type for types that can be passed
# to tf.cast?
_AUTOCAST_TYPES = (tf.Tensor, tf.SparseTensor,
tf.RaggedTensor)
keras_layers_gauge = tf.__internal__.monitoring.BoolGauge(
'/tensorflow/api/keras/layers', 'keras layers usage', 'method')
keras_models_gauge = tf.__internal__.monitoring.BoolGauge(
'/tensorflow/api/keras/models', 'keras model usage', 'method')
keras_api_gauge = tf.__internal__.monitoring.BoolGauge(
'/tensorflow/api/keras', 'keras api usage', 'method')
keras_premade_model_gauge = tf.__internal__.monitoring.BoolGauge(
'/tensorflow/api/keras/premade_models', 'premade keras model usage', 'type')
_is_name_scope_on_model_declaration_enabled = False
@keras_export('keras.layers.Layer')
class Layer(tf.Module, version_utils.LayerVersionSelector):
"""This is the class from which all layers inherit.
A layer is a callable object that takes as input one or more tensors and
that outputs one or more tensors. It involves *computation*, defined
in the `call()` method, and a *state* (weight variables). State can be
created in various places, at the convenience of the subclass implementer:
* in `__init__()`;
* in the optional `build()` method, which is invoked by the first
`__call__()` to the layer, and supplies the shape(s) of the input(s),
which may not have been known at initialization time;
* in the first invocation of `call()`, with some caveats discussed
below.
Users will just instantiate a layer and then treat it as a callable.
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: The dtype of the layer's computations and weights. Can also be a
`tf.keras.mixed_precision.Policy`, which allows the computation and weight
dtype to differ. Default of `None` means to use
`tf.keras.mixed_precision.global_policy()`, which is a float32 policy
unless set to different value.
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Attributes:
name: The name of the layer (string).
dtype: The dtype of the layer's weights.
variable_dtype: Alias of `dtype`.
compute_dtype: The dtype of the layer's computations. Layers automatically
cast inputs to this dtype which causes the computations and output to also
be in this dtype. When mixed precision is used with a
`tf.keras.mixed_precision.Policy`, this will be different than
`variable_dtype`.
dtype_policy: The layer's dtype policy. See the
`tf.keras.mixed_precision.Policy` documentation for details.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
trainable: Whether the layer should be trained (boolean), i.e. whether
its potentially-trainable weights should be returned as part of
`layer.trainable_weights`.
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Defines custom layer attributes, and creates layer weights
that do not depend on input shapes, using `add_weight()`, or other state.
* `build(self, input_shape)`: This method can be used to create weights that
depend on the shape(s) of the input(s), using `add_weight()`, or other
state. `__call__()` will automatically build the layer (if it has not been
built yet) by calling `build()`.
* `call(self, inputs, *args, **kwargs)`: Called in `__call__` after making
sure `build()` has been called. `call()` performs the logic of applying the
layer to the `inputs`. The first invocation may additionally create state
that could not be conveniently created in `build()`; see its docstring
for details.
Two reserved keyword arguments you can optionally use in `call()` are:
- `training` (boolean, whether the call is in inference mode or training
mode). See more details in [the layer/model subclassing guide](
https://www.tensorflow.org/guide/keras/custom_layers_and_models#privileged_training_argument_in_the_call_method)
- `mask` (boolean tensor encoding masked timesteps in the input, used
in RNN layers). See more details in [the layer/model subclassing guide](
https://www.tensorflow.org/guide/keras/custom_layers_and_models#privileged_mask_argument_in_the_call_method)
A typical signature for this method is `call(self, inputs)`, and user could
optionally add `training` and `mask` if the layer need them. `*args` and
`**kwargs` is only useful for future extension when more input parameters
are planned to be added.
* `get_config(self)`: Returns a dictionary containing the configuration used
to initialize this layer. If the keys differ from the arguments
in `__init__`, then override `from_config(self)` as well.
This method is used when saving
the layer or a model that contains this layer.
Examples:
Here's a basic example: a layer with two variables, `w` and `b`,
that returns `y = w . x + b`.
It shows how to implement `build()` and `call()`.
Variables set as attributes of a layer are tracked as weights
of the layers (in `layer.weights`).
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super(SimpleDense, self).__init__()
self.units = units
def build(self, input_shape): # Create the state of the layer (weights)
w_init = tf.random_normal_initializer()
self.w = tf.Variable(
initial_value=w_init(shape=(input_shape[-1], self.units),
dtype='float32'),
trainable=True)
b_init = tf.zeros_initializer()
self.b = tf.Variable(
initial_value=b_init(shape=(self.units,), dtype='float32'),
trainable=True)
def call(self, inputs): # Defines the computation from inputs to outputs
return tf.matmul(inputs, self.w) + self.b
# Instantiates the layer.
linear_layer = SimpleDense(4)
# This will also call `build(input_shape)` and create the weights.
y = linear_layer(tf.ones((2, 2)))
assert len(linear_layer.weights) == 2
# These weights are trainable, so they're listed in `trainable_weights`:
assert len(linear_layer.trainable_weights) == 2
```
Note that the method `add_weight()` offers a shortcut to create weights:
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super(SimpleDense, self).__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(self.units,),
initializer='random_normal',
trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
```
Besides trainable weights, updated via backpropagation during training,
layers can also have non-trainable weights. These weights are meant to
be updated manually during `call()`. Here's a example layer that computes
the running sum of its inputs:
```python
class ComputeSum(Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
# Create a non-trainable weight.
self.total = tf.Variable(initial_value=tf.zeros((input_dim,)),
trainable=False)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
my_sum = ComputeSum(2)
x = tf.ones((2, 2))
y = my_sum(x)
print(y.numpy()) # [2. 2.]
y = my_sum(x)
print(y.numpy()) # [4. 4.]
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []
```
For more information about creating layers, see the guide
[Making new Layers and Models via subclassing](
https://www.tensorflow.org/guide/keras/custom_layers_and_models)
"""
# See tf.Module for the usage of this property.
# The key for _obj_reference_counts_dict is a Trackable, which could be a
# variable or layer etc. tf.Module._flatten will fail to flatten the key
# since it is trying to convert Trackable to a string. This attribute can be
# ignored even after the fix of nest lib, since the trackable object should
# already been available as individual attributes. _obj_reference_counts_dict
# just contains a copy of them.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_obj_reference_counts_dict',),
tf.Module._TF_MODULE_IGNORED_PROPERTIES
))
# When loading from a SavedModel, Layers typically can be revived into a
# generic Layer wrapper. Sometimes, however, layers may implement methods
# that go beyond this wrapper, as in the case of PreprocessingLayers'
# `adapt` method. When this is the case, layer implementers can override
# must_restore_from_config to return True; layers with this property must
# be restored into their actual objects (and will fail if the object is
# not available to the restoration code).
_must_restore_from_config = False
def _get_cell_name(self):
canonical_name = get_canonical_name_for_symbol(
self.__class__, api_name='keras', add_prefix_to_v1_names=True)
if canonical_name is not None:
return 'tf.{}'.format(canonical_name)
return self.__class__.__module__ + '.' + self.__class__.__name__
def _instrument_layer_creation(self):
self._instrumented_keras_api = False
self._instrumented_keras_layer_class = False
self._instrumented_keras_model_class = False
if not getattr(self, '_disable_keras_instrumentation', False):
keras_api_gauge.get_cell('layer').set(True)
self._instrumented_keras_api = True
if getattr(self, '_is_model_for_instrumentation', False):
keras_models_gauge.get_cell(self._get_cell_name()).set(True)
self._instrumented_keras_model_class = True
else:
keras_layers_gauge.get_cell(self._get_cell_name()).set(True)
self._instrumented_keras_layer_class = True
else:
# This is a legacy layer that has disabled instrumentation
# as a native keras object. We still instrument this as
# legacy usage.
keras_api_gauge.get_cell('legacy_layer').set(True)
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self,
trainable=True,
name=None,
dtype=None,
dynamic=False,
**kwargs):
self._instrument_layer_creation()
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_dim',
'input_shape',
'batch_input_shape',
'batch_size',
'weights',
'activity_regularizer',
'autocast',
'implementation',
}
# Validate optional keyword arguments.
generic_utils.validate_kwargs(kwargs, allowed_kwargs)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training.
if not (isinstance(trainable, bool) or
(isinstance(trainable, (tf.Tensor, tf.Variable)) and
trainable.dtype is tf.bool)):
raise TypeError(
'Expected `trainable` argument to be a boolean, '
f'but got: {trainable}')
self._trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self._stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights. (Note that the first call() may also create weights,
# independent of build().)
self.built = False
# Provides information about which inputs are compatible with the layer.
self._input_spec = None
# SavedModel-related attributes.
# Record the build input shape for loading purposes.
# TODO(kathywu): Move this to Layer._set_save_spec once cl/290121460 is
# submitted.
self._build_input_shape = None
self._saved_model_inputs_spec = None
self._saved_model_arg_spec = None
# `Layer.compute_mask` will be called at the end of `Layer.__call__` if
# `Layer.compute_mask` is overridden, or if the `Layer` subclass sets
# `self.supports_masking=True`.
self._supports_masking = not generic_utils.is_default(self.compute_mask)
self._init_set_name(name)
self._activity_regularizer = regularizers.get(
kwargs.pop('activity_regularizer', None))
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = []
# Object to store all thread local layer properties.
self._thread_local = threading.local()
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# Ensures the same metric is not added multiple times in `MirroredStrategy`.
self._metrics_lock = threading.Lock()
# Both graph and subclassed networks have a dtype policy. For graph
# networks, the policy's compute and variable dtypes are ignored. Such
# networks only use the policy if it is a PolicyV1, in which case it uses
# the PolicyV1's loss_scale (Policy does not have a loss_scale). For
# subclassed networks, the compute and variable dtypes are used as like any
# ordinary layer.
self._set_dtype_policy(dtype)
# Boolean indicating whether the layer automatically casts its inputs to the
# layer's compute_dtype.
self._autocast = kwargs.get('autocast',
base_layer_utils.v2_dtype_behavior_enabled())
# Tracks `TrackableDataStructure`s, `Module`s, and `Layer`s.
# Ordered by when the object was assigned as an attr.
# Entries are unique.
self._maybe_create_attribute('_self_tracked_trackables', [])
# These lists will be filled via successive calls
# to self._add_inbound_node().
# Used in symbolic mode only, only in conjunction with graph-networks
self._inbound_nodes_value = []
self._outbound_nodes_value = []
self._init_call_fn_args()
# Whether the `call` method can be used to build a TF graph without issues.
# This attribute has no effect if the model is created using the Functional
# API. Instead, `model.dynamic` is determined based on the internal layers.
if not isinstance(dynamic, bool):
raise TypeError(
f'Expected `dynamic` argument to be a boolean, but got: {dynamic}')
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_dim' in kwargs and 'input_shape' not in kwargs:
# Backwards compatibility: alias 'input_dim' to 'input_shape'.
kwargs['input_shape'] = (kwargs['input_dim'],)
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
self._initial_weights = kwargs.get('weights', None)
# Whether the layer will track any layers that is set as attribute on itself
# as sub-layers, the weights from the sub-layers will be included in the
# parent layer's variables() as well.
# Default to True, which means auto tracking is turned on. Certain subclass
# might want to turn it off, like Sequential model.
self._auto_track_sub_layers = True
# For backwards compat reasons, most built-in layers do not guarantee
# That they will 100% preserve the structure of input args when saving
# / loading configs. E.g. they may un-nest an arg that is
# a list with one element.
self._preserve_input_structure_in_config = False
# Save outer name scope at layer declaration so that it is preserved at
# the actual layer construction.
self._outer_name_scope = tf.get_current_name_scope()
@tf.__internal__.tracking.no_automatic_dependency_tracking
@generic_utils.default
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call. It is invoked automatically before
the first execution of `call()`.
This is typically used to create the weights of `Layer` subclasses
(at the discretion of the subclass implementer).
Args:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
# Only record the build input shapes of overridden build methods.
if not hasattr(self.build, '_is_default'):
self._build_input_shape = input_shape
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, *args, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
The `call()` method may not create state (except in its first invocation,
wrapping the creation of variables or other resources in `tf.init_scope()`).
It is recommended to create state in `__init__()`, or the `build()` method
that is called automatically before `call()` executes the first time.
Args:
inputs: Input tensor, or dict/list/tuple of input tensors.
The first positional `inputs` argument is subject to special rules:
- `inputs` must be explicitly passed. A layer cannot have zero
arguments, and `inputs` cannot be provided via the default value
of a keyword argument.
- NumPy array or Python scalar values in `inputs` get cast as tensors.
- Keras mask metadata is only collected from `inputs`.
- Layers are built (`build(input_shape)` method)
using shape info from `inputs` only.
- `input_spec` compatibility is only checked against `inputs`.
- Mixed precision input casting is only applied to `inputs`.
If a layer has tensor arguments in `*args` or `**kwargs`, their
casting behavior in mixed precision should be handled manually.
- The SavedModel input specification is generated using `inputs` only.
- Integration with various ecosystem packages like TFMOT, TFLite,
TF.js, etc is only supported for `inputs` and not for tensors in
positional and keyword arguments.
*args: Additional positional arguments. May contain tensors, although
this is not recommended, for the reasons above.
**kwargs: Additional keyword arguments. May contain tensors, although
this is not recommended, for the reasons above.
The following optional keyword arguments are reserved:
- `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
- `mask`: Boolean input mask. If the layer's `call()` method takes a
`mask` argument, its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from a layer
that generated a corresponding mask, i.e. if it came from a Keras
layer with masking support).
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def _add_trackable(self, trackable_object, trainable):
"""Adds a Trackable object to this layer's state.
Args:
trackable_object: The tf.tracking.Trackable object to add.
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases) or
"non_trainable_variables" (e.g. BatchNorm mean and variance).
Returns:
The TrackableWeightHandler used to track this object.
"""
if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler):
handler = trackable_object
else:
handler = base_layer_utils.TrackableWeightHandler(trackable_object)
if trainable:
self._trainable_weights.append(handler)
else:
self._non_trainable_weights.append(handler)
return handler
@doc_controls.for_subclass_implementers
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
use_resource=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Args:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype`.
initializer: Initializer instance (callable).
regularizer: Regularizer instance (callable).
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that `trainable` cannot be `True` if `synchronization`
is set to `ON_READ`.
constraint: Constraint instance (callable).
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter`,
`collections`, `experimental_autocast` and `caching_device`.
Returns:
The variable created.
Raises:
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
if shape is None:
shape = ()
kwargs.pop('partitioner', None) # Ignored.
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['collections', 'experimental_autocast',
'caching_device', 'getter']:
raise TypeError('Unknown keyword argument:', kwarg)
collections_arg = kwargs.pop('collections', None)
# 'experimental_autocast' can be set to False by the caller to indicate an
# AutoCastVariable should never be created.
autocast = kwargs.pop('experimental_autocast', True)
# See the docstring for tf.Variable about the details for caching_device.
caching_device = kwargs.pop('caching_device', None)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = tf.as_dtype(dtype)
if self._dtype_policy.variable_dtype is None:
# The policy is "_infer", so we infer the policy from the variable dtype.
self._set_dtype_policy(policy.Policy(dtype.base_dtype.name))
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.get('glorot_uniform')
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.get('zeros')
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
elif 'getter' not in kwargs:
# When `getter` is specified, it's possibly fine for `initializer` to be
# None since it's up to the custom `getter` to raise error in case it
# indeed needs `initializer`.
raise ValueError(f'An initializer for variable {name} of type '
f'{dtype.base_dtype} is required for layer '
f'{self.name}. Received: {initializer}.')
getter = kwargs.pop('getter', base_layer_utils.make_variable)
if (autocast and
self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype
and dtype.is_floating):
old_getter = getter
# Wrap variable constructor to return an AutoCastVariable.
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
return autocast_variable.create_autocast_variable(variable)
# Also the caching_device does not work with the mixed precision API,
# disable it if it is specified.
# TODO(b/142020079): Re-enable it once the bug is fixed.
if caching_device is not None:
tf_logging.warning(
'`caching_device` does not work with mixed precision API. Ignoring '
'user specified `caching_device`.')
caching_device = None
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Trackable` method.
getter=getter,
# Manage errors in Layer rather than Trackable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable,
use_resource=use_resource,
collections=collections_arg,
synchronization=synchronization,
aggregation=aggregation,
caching_device=caching_device)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
name_in_scope = variable.name[:variable.name.find(':')]
self._handle_weight_regularization(name_in_scope,
variable,
regularizer)
if base_layer_utils.is_split_variable(variable):
for v in variable:
backend.track_variable(v)
if trainable:
self._trainable_weights.append(v)
else:
self._non_trainable_weights.append(v)
else:
backend.track_variable(variable)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
@generic_utils.default
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Note that `get_config()` does not guarantee to return a fresh copy of dict
every time it is called. The callers should make a copy of the returned dict
if they want to modify it.
Returns:
Python dictionary.
"""
all_args = tf_inspect.getfullargspec(self.__init__).args
config = {
'name': self.name,
'trainable': self.trainable,
}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
config['dtype'] = policy.serialize(self._dtype_policy)
if hasattr(self, 'dynamic'):
# Only include `dynamic` in the `config` if it is `True`
if self.dynamic:
config['dynamic'] = self.dynamic
elif 'dynamic' in all_args:
all_args.remove('dynamic')
expected_args = config.keys()
# Finds all arguments in the `__init__` that are not in the config:
extra_args = [arg for arg in all_args if arg not in expected_args]
# Check that either the only argument in the `__init__` is `self`,
# or that `get_config` has been overridden:
if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):
raise NotImplementedError(textwrap.dedent(f"""
Layer {self.__class__.__name__} has arguments {extra_args}
in `__init__` and therefore must override `get_config()`.
Example:
class CustomLayer(keras.layers.Layer):
def __init__(self, arg1, arg2):
super().__init__()
self.arg1 = arg1
self.arg2 = arg2
def get_config(self):
config = super().get_config()
config.update({{
"arg1": self.arg1,
"arg2": self.arg2,
}})
return config"""))
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Args:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
This method will cause the layer's state to be built, if that has not
happened before. This requires that the layer will later be used with
inputs that match the input shape provided here.
Args:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if tf.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually with shapes that are incompatible
# with the shape the Layer will be called on (these users will have to
# implement `compute_output_shape` themselves).
self._maybe_build(input_shape)
graph_name = str(self.name) + '_scratch_graph'
with tf.__internal__.FuncGraph(graph_name).as_default():
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
inputs = tf.nest.map_structure(_make_placeholder_like, input_shape)
try:
outputs = self(inputs, training=False)
except TypeError as e:
raise NotImplementedError(
'We could not automatically infer the static shape of the '
'layer\'s output. Please implement the '
'`compute_output_shape` method on your layer (%s).' %
self.__class__.__name__) from e
return tf.nest.map_structure(lambda t: t.shape, outputs)
raise NotImplementedError(
'Please run in eager mode or implement the `compute_output_shape` '
'method on your layer (%s).' % self.__class__.__name__)
@doc_controls.for_subclass_implementers
def compute_output_signature(self, input_signature):
"""Compute the output tensor signature of the layer based on the inputs.
Unlike a TensorShape object, a TensorSpec object contains both shape
and dtype information for a tensor. This method allows layers to provide
output dtype information if it is different from the input dtype.
For any layer that doesn't implement this function,
the framework will fall back to use `compute_output_shape`, and will
assume that the output dtype matches the input dtype.
Args:
input_signature: Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.
Returns:
Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.
Raises:
TypeError: If input_signature contains a non-TensorSpec object.
"""
def check_type_return_shape(s):
if not isinstance(s, tf.TensorSpec):
raise TypeError('Only TensorSpec signature types are supported. '
f'Received: {s}.')
return s.shape
input_shape = tf.nest.map_structure(check_type_return_shape, input_signature)
output_shape = self.compute_output_shape(input_shape)
dtype = self._compute_dtype
if dtype is None:
input_dtypes = [s.dtype for s in tf.nest.flatten(input_signature)]
# Default behavior when self.dtype is None, is to use the first input's
# dtype.
dtype = input_dtypes[0]
return tf.nest.map_structure(
lambda s: tf.TensorSpec(dtype=dtype, shape=s),
output_shape)
def _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs):
if self.dynamic:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
input_signature = tf.nest.map_structure(
lambda x: tf.TensorSpec(shape=x.shape, dtype=x.dtype),
inputs)
output_signature = self.compute_output_signature(input_signature)
return tf.nest.map_structure(keras_tensor.KerasTensor, output_signature)
else:
return self._infer_output_signature(inputs, args, kwargs, input_masks)
def _infer_output_signature(self, inputs, args, kwargs, input_masks):
"""Call the layer on input KerasTensors and returns output KerasTensors."""
call_fn = self.call
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
call_fn = tf.__internal__.autograph.tf_convert(
self.call, tf.__internal__.autograph.control_status_ctx())
call_fn = traceback_utils.inject_argument_info_in_traceback(
call_fn,
object_name=f'layer "{self.name}" (type {self.__class__.__name__})')
# We enter a scratch graph and build placeholder inputs inside of it that
# match the input args.
# We then call the layer inside of the scratch graph to identify the
# output signatures, then we build KerasTensors corresponding to those
# outputs.
scratch_graph = tf.__internal__.FuncGraph(str(self.name) + '_scratch_graph')
with scratch_graph.as_default():
inputs = tf.nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, inputs)
args = tf.nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, args)
kwargs = tf.nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, kwargs)
input_masks = tf.nest.map_structure(
keras_tensor.keras_tensor_to_placeholder, input_masks)
with backend.name_scope(self._name_scope()): # pylint: disable=not-callable
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
# Build layer if applicable (if the `build` method has been
# overridden).
# TODO(kaftan): do we maybe_build here, or have we already done it?
self._maybe_build(inputs)
inputs = self._maybe_cast_inputs(inputs)
outputs = call_fn(inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks,
build_graph=False)
outputs = tf.nest.map_structure(
keras_tensor.keras_tensor_from_tensor, outputs)
self._set_save_spec(inputs, args, kwargs)
if hasattr(self, '_set_inputs') and not self.inputs:
# TODO(kaftan): figure out if we need to do this at all
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
self._set_inputs(inputs, outputs)
del scratch_graph
return outputs
@generic_utils.default
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Args:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self._supports_masking:
if any(m is not None for m in tf.nest.flatten(mask)):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask.
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
@traceback_utils.filter_traceback
def __call__(self, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Args:
*args: Positional arguments to be passed to `self.call`.
**kwargs: Keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
- If the layer is not built, the method will call `build`.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
RuntimeError: if `super().__init__()` was not called in the constructor.
"""
if not hasattr(self, '_thread_local'):
raise RuntimeError(
'You must call `super().__init__()` in the layer constructor.')
# `inputs` (the first arg in the method spec) is special cased in
# layer call due to historical reasons.
# This special casing currently takes the form of:
# - 'inputs' must be explicitly passed. A layer cannot have zero arguments,
# and inputs cannot have been provided via the default value of a kwarg.
# - numpy/scalar values in `inputs` get converted to tensors
# - implicit masks / mask metadata are only collected from 'inputs`
# - Layers are built using shape info from 'inputs' only
# - input_spec compatibility is only checked against `inputs`
# - mixed precision casting (autocast) is only applied to `inputs`,
# not to any other argument.
inputs, args, kwargs = self._split_out_first_arg(args, kwargs)
input_list = tf.nest.flatten(inputs)
# Functional Model construction mode is invoked when `Layer`s are called on
# symbolic `KerasTensor`s, i.e.:
# >> inputs = tf.keras.Input(10)
# >> outputs = MyLayer()(inputs) # Functional construction mode.
# >> model = tf.keras.Model(inputs, outputs)
if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
return self._functional_construction_call(inputs, args, kwargs,
input_list)
# Maintains info about the `Layer.call` stack.
call_context = base_layer_utils.call_context()
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (
tf.Tensor, np.ndarray, float, int)) for x in input_list):
inputs = tf.nest.map_structure(_convert_numpy_or_python_types, inputs)
input_list = tf.nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
input_masks, mask_is_implicit = self._get_input_masks(
inputs, input_list, args, kwargs)
if self._expects_mask_arg and mask_is_implicit:
kwargs['mask'] = input_masks
# Training mode for `Layer.call` is set via (in order of priority):
# (1) The `training` argument passed to this `Layer.call`, if it is not None
# (2) The training mode of an outer `Layer.call`.
# (3) The default mode set by `tf.keras.backend.set_learning_phase` (if set)
# (4) Any non-None default value for `training` specified in the call
# signature
# (5) False (treating the layer as if it's in inference)
args, kwargs, training_mode = self._set_training_mode(
args, kwargs, call_context)
# Losses are cleared for all sublayers on the outermost `Layer.call`.
# Losses are not cleared on inner `Layer.call`s, because sublayers can be
# called multiple times.
if not call_context.in_call:
self._clear_losses()
eager = tf.executing_eagerly()
with call_context.enter(
layer=self,
inputs=inputs,
build_graph=not eager,
training=training_mode):
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
if eager:
call_fn = self.call
name_scope = self._name
else:
name_scope = self._name_scope() # Avoid autoincrementing. # pylint: disable=not-callable
call_fn = self._autographed_call()
call_fn = traceback_utils.inject_argument_info_in_traceback(
call_fn,
object_name=f'layer "{self.name}" (type {self.__class__.__name__})')
with tf.name_scope(name_scope):
if not self.built:
self._maybe_build(inputs)
if self._autocast:
inputs = self._maybe_cast_inputs(inputs, input_list)
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
outputs = call_fn(inputs, *args, **kwargs)
if self._activity_regularizer:
self._handle_activity_regularization(inputs, outputs)
if self._supports_masking:
self._set_mask_metadata(inputs, outputs, input_masks, not eager)
if self._saved_model_inputs_spec is None:
self._set_save_spec(inputs, args, kwargs)
return outputs
def _functional_construction_call(self, inputs, args, kwargs, input_list):
call_context = base_layer_utils.call_context()
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (
tf.Tensor, np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (tf.Tensor, np.ndarray, float, int)):
return tf.convert_to_tensor(x)
return x
inputs = tf.nest.map_structure(_convert_non_tensor, inputs)
input_list = tf.nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
mask_arg_passed_by_framework = False
input_masks, mask_is_implicit = self._get_input_masks(
inputs, input_list, args, kwargs)
if self._expects_mask_arg and mask_is_implicit:
kwargs['mask'] = input_masks
mask_arg_passed_by_framework = True
# If `training` argument is None or not explicitly passed,
# propagate `training` value from this layer's calling layer.
training_value = None
training_arg_passed_by_framework = False
# Priority 1: `training` was explicitly passed a non-None value.
if self._call_arg_was_passed('training', args, kwargs):
training_value = self._get_call_arg_value('training', args, kwargs)
if not self._expects_training_arg:
kwargs.pop('training')
if training_value is None:
# Priority 2: `training` was passed to a parent layer.
if call_context.training is not None:
training_value = call_context.training
# Priority 3: `learning_phase()` has been set.
elif backend.global_learning_phase_is_set():
training_value = backend.learning_phase()
# Force the training_value to be bool type which matches to the contract
# for layer/model call args.
if tf.is_tensor(training_value):
training_value = tf.cast(training_value, tf.bool)
else:
training_value = bool(training_value)
# Priority 4: trace layer with the default training argument specified
# in the `call` signature (or in inference mode if the `call` signature
# specifies no non-None default).
else:
training_value = self._default_training_arg
# In cases (2), (3), (4) the training argument is passed automatically
# by the framework, and will not be hard-coded into the model.
if self._expects_training_arg:
args, kwargs = self._set_call_arg_value('training', training_value,
args, kwargs)
training_arg_passed_by_framework = True
with call_context.enter(
layer=self, inputs=inputs, build_graph=True, training=training_value):
# Check input assumptions set after layer building, e.g. input shape.
outputs = self._keras_tensor_symbolic_call(
inputs, input_masks, args, kwargs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
if training_arg_passed_by_framework:
args, kwargs = self._set_call_arg_value(
'training', None, args, kwargs, pop_kwarg_if_none=True)
if mask_arg_passed_by_framework:
kwargs.pop('mask')
# Node connectivity does not special-case the first argument.
outputs = self._set_connectivity_metadata((inputs,) + args, kwargs,
outputs)
return outputs
def _set_training_mode(self, args, kwargs, call_context):
training_mode = None
if self._expects_training_arg:
# (1) `training` was passed to this `Layer.call`.
if self._call_arg_was_passed('training', args, kwargs):
training_mode = self._get_call_arg_value('training', args, kwargs)
# If no `training` arg was passed, or `None` was explicitly passed,
# the framework will make a decision about the training mode is.
if training_mode is None:
call_ctx_training = call_context.training
# (2) `training` mode is inferred from an outer `Layer.call`.
if call_ctx_training is not None:
training_mode = call_ctx_training
# (3) User set `tf.keras.backend.set_learning_phase`.
elif backend.global_learning_phase_is_set():
training_mode = backend.learning_phase()
# Ensure value is a `bool` or `tf.bool`.
if isinstance(training_mode, bool):
pass
elif tf.is_tensor(training_mode):
training_mode = tf.cast(training_mode, tf.bool)
else:
training_mode = bool(training_mode)
# (4) We default to using `call`'s default value for `training`,
# or treating the layer as if it is in inference if no non-None default
# is specified in the `call` signature.
else:
training_mode = self._default_training_arg
# For case (2), (3), (4) `training` arg is passed by framework.
args, kwargs = self._set_call_arg_value('training', training_mode, args,
kwargs)
else:
if 'training' in kwargs:
# `training` was passed to this `Layer` but is not needed for
# `Layer.call`. It will set the default mode for inner `Layer.call`s.
training_mode = kwargs.pop('training')
else:
# Grab the current `training` mode from any outer `Layer.call`.
training_mode = call_context.training
return args, kwargs, training_mode
def _autographed_call(self):
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
return tf.__internal__.autograph.tf_convert(
self.call, tf.__internal__.autograph.control_status_ctx())
else:
return self.call
@property
def dtype(self):
"""The dtype of the layer weights.
This is equivalent to `Layer.dtype_policy.variable_dtype`. Unless
mixed precision is used, this is the same as `Layer.compute_dtype`, the
dtype of the layer's computations.
"""
return self._dtype_policy.variable_dtype
@property
def name(self):
"""Name of the layer (string), set in the constructor."""
return self._name
@property
def supports_masking(self):
"""Whether this layer supports computing a mask using `compute_mask`."""
return self._supports_masking
@supports_masking.setter
def supports_masking(self, value):
self._supports_masking = value
@property
def dynamic(self):
"""Whether the layer is dynamic (eager-only); set in the constructor."""
return any(layer._dynamic for layer in self._flatten_layers())
@property
@doc_controls.do_not_doc_inheritable
def stateful(self):
return any(layer._stateful for layer in self._flatten_layers())
@stateful.setter
def stateful(self, value):
self._stateful = value
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
"""Sets trainable attribute for the layer and its sublayers.
When this value is changed during training (e.g. with a
`tf.keras.callbacks.Callback`) you need to call the parent
`tf.keras.Model.make_train_function` with `force=True` in order to recompile
the training graph.
Args:
value: Boolean with the desired state for the layer's trainable attribute.
"""
for layer in self._flatten_layers():
layer._trainable = value
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def input_spec(self):
"""`InputSpec` instance(s) describing the input format for this layer.
When you create a layer subclass, you can set `self.input_spec` to enable
the layer to run input compatibility checks when it is called.
Consider a `Conv2D` layer: it can only be called on a single input tensor
of rank 4. As such, you can set, in `__init__()`:
```python
self.input_spec = tf.keras.layers.InputSpec(ndim=4)
```
Now, if you try to call the layer on an input that isn't rank 4
(for instance, an input of shape `(2,)`, it will raise a nicely-formatted
error:
```
ValueError: Input 0 of layer conv2d is incompatible with the layer:
expected ndim=4, found ndim=1. Full shape received: [2]
```
Input checks that can be specified via `input_spec` include:
- Structure (e.g. a single input, a list of 2 inputs, etc)
- Shape
- Rank (ndim)
- Dtype
For more information, see `tf.keras.layers.InputSpec`.
Returns:
A `tf.keras.layers.InputSpec` instance, or nested structure thereof.
"""
return self._input_spec
@input_spec.setter
# Must be decorated to prevent tracking, since the input_spec can be nested
# InputSpec objects.
@tf.__internal__.tracking.no_automatic_dependency_tracking
def input_spec(self, value):
for v in tf.nest.flatten(value):
if v is not None and not isinstance(v, InputSpec):
raise TypeError('Layer input_spec must be an instance of InputSpec. '
'Got: {}'.format(v))
self._input_spec = value
@property
def trainable_weights(self):
"""List of all trainable weights tracked by this layer.
Trainable weights are updated via gradient descent during training.
Returns:
A list of trainable variables.
"""
if self.trainable:
children_weights = self._gather_children_attribute('trainable_variables')
return self._dedup_weights(self._trainable_weights + children_weights)
else:
return []
@property
def non_trainable_weights(self):
"""List of all non-trainable weights tracked by this layer.
Non-trainable weights are *not* updated during training. They are expected
to be updated manually in `call()`.
Returns:
A list of non-trainable variables.
"""
if self.trainable:
children_weights = self._gather_children_attribute(
'non_trainable_variables')
non_trainable_weights = self._non_trainable_weights + children_weights
else:
children_weights = self._gather_children_attribute('variables')
non_trainable_weights = (
self._trainable_weights + self._non_trainable_weights +
children_weights)
return self._dedup_weights(non_trainable_weights)
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.trainable_weights + self.non_trainable_weights
@property
@doc_controls.do_not_generate_docs
def updates(self):
warnings.warn(
'`layer.updates` will be removed in a future version. '
'This property should not be used in TensorFlow 2.0, '
'as `updates` are applied automatically.',
stacklevel=2)
return []
@property
def losses(self):
"""List of losses added using the `add_loss()` API.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Examples:
>>> class MyLayer(tf.keras.layers.Layer):
... def call(self, inputs):
... self.add_loss(tf.abs(tf.reduce_mean(inputs)))
... return inputs
>>> l = MyLayer()
>>> l(np.ones((10, 1)))
>>> l.losses
[1.0]
>>> inputs = tf.keras.Input(shape=(10,))
>>> x = tf.keras.layers.Dense(10)(inputs)
>>> outputs = tf.keras.layers.Dense(1)(x)
>>> model = tf.keras.Model(inputs, outputs)
>>> # Activity regularization.
>>> len(model.losses)
0
>>> model.add_loss(tf.abs(tf.reduce_mean(x)))
>>> len(model.losses)
1
>>> inputs = tf.keras.Input(shape=(10,))
>>> d = tf.keras.layers.Dense(10, kernel_initializer='ones')
>>> x = d(inputs)
>>> outputs = tf.keras.layers.Dense(1)(x)
>>> model = tf.keras.Model(inputs, outputs)
>>> # Weight regularization.
>>> model.add_loss(lambda: tf.reduce_mean(d.kernel))
>>> model.losses
[<tf.Tensor: shape=(), dtype=float32, numpy=1.0>]
Returns:
A list of tensors.
"""
collected_losses = []
for layer in self._flatten_layers():
# If any eager losses are present, we assume the model to be part of an
# eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
if layer._eager_losses:
# Filter placeholder losses that may have been added by revived layers.
# (see base_layer_utils for details).
if (layer._eager_losses[0] is
not base_layer_utils.REVIVED_LOSS_PLACEHOLDER):
collected_losses.extend(layer._eager_losses)
else:
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses
def add_loss(self, losses, **kwargs):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This method can be used inside a subclassed layer or model's `call`
function, in which case `losses` should be a Tensor or list of Tensors.
Example:
```python
class MyLayer(tf.keras.layers.Layer):
def call(self, inputs):
self.add_loss(tf.abs(tf.reduce_mean(inputs)))
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any loss Tensors passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
losses become part of the model's topology and are tracked in `get_config`.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Activity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))
```
If this is not the case for your loss (if, for example, your loss references
a `Variable` of one of the model's layers), you can wrap your loss in a
zero-argument lambda. These losses are not tracked as part of the model's
topology since they can't be serialized.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
d = tf.keras.layers.Dense(10)
x = d(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(d.kernel))
```
Args:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
**kwargs: Additional keyword arguments for backward compatibility.
Accepted values:
inputs - Deprecated, will be automatically inferred.
"""
kwargs.pop('inputs', None)
if kwargs:
raise TypeError('Unknown keyword arguments: %s' % (kwargs.keys(),))
def _tag_callable(loss):
"""Tags callable loss tensor as `_unconditional_loss`."""
if callable(loss):
# We run the loss without autocasting, as regularizers are often
# numerically unstable in float16.
with autocast_variable.enable_auto_cast_variables(None):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tf.is_tensor(loss):
loss = tf.convert_to_tensor(
loss, dtype=backend.floatx())
loss._unconditional_loss = True # pylint: disable=protected-access
return loss
losses = tf.nest.flatten(losses)
callable_losses = []
eager_losses = []
symbolic_losses = []
for loss in losses:
if callable(loss):
callable_losses.append(functools.partial(_tag_callable, loss))
continue
if loss is None:
continue
if not tf.is_tensor(loss) and not isinstance(
loss, keras_tensor.KerasTensor):
loss = tf.convert_to_tensor(
loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if ((tf_utils.is_symbolic_tensor(loss) or
isinstance(loss, keras_tensor.KerasTensor)) and
not base_layer_utils.is_in_tf_function()):
symbolic_losses.append(loss)
elif tf.is_tensor(loss):
eager_losses.append(loss)
self._callable_losses.extend(callable_losses)
in_call_context = base_layer_utils.call_context().in_call
if eager_losses and not in_call_context:
raise ValueError(
'Expected a symbolic Tensors or a callable for the loss value. '
'Please wrap your loss computation in a zero argument `lambda`.')
self._eager_losses.extend(eager_losses)
for symbolic_loss in symbolic_losses:
if getattr(self, '_is_graph_network', False):
self._graph_network_add_loss(symbolic_loss)
else:
# Possible a loss was added in a Layer's `build`.
self._losses.append(symbolic_loss)
def _clear_losses(self):
"""Used every step in eager to reset losses."""
# Set to thread local directly to avoid Layer.__setattr__ overhead.
if not getattr(self, '_self_tracked_trackables',
None): # Fast path for single Layer.
self._thread_local._eager_losses = []
else:
for layer in self._flatten_layers():
layer._thread_local._eager_losses = []
@property
def metrics(self):
"""List of metrics added using the `add_metric()` API.
Example:
>>> input = tf.keras.layers.Input(shape=(3,))
>>> d = tf.keras.layers.Dense(2)
>>> output = d(input)
>>> d.add_metric(tf.reduce_max(output), name='max')
>>> d.add_metric(tf.reduce_min(output), name='min')
>>> [m.name for m in d.metrics]
['max', 'min']
Returns:
A list of `Metric` objects.
"""
collected_metrics = []
for layer in self._flatten_layers():
if not hasattr(layer, '_metrics_lock'):
continue
with layer._metrics_lock:
collected_metrics.extend(layer._metrics)
return collected_metrics
def add_metric(self, value, name=None, **kwargs):
"""Adds metric tensor to the layer.
This method can be used inside the `call()` method of a subclassed layer
or model.
```python
class MyMetricLayer(tf.keras.layers.Layer):
def __init__(self):
super(MyMetricLayer, self).__init__(name='my_metric_layer')
self.mean = tf.keras.metrics.Mean(name='metric_1')
def call(self, inputs):
self.add_metric(self.mean(inputs))
self.add_metric(tf.reduce_sum(inputs), name='metric_2')
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any tensor passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
metrics become part of the model's topology and are tracked when you
save the model via `save()`.
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(math_ops.reduce_sum(x), name='metric_1')
```
Note: Calling `add_metric()` with the result of a metric object on a
Functional Model, as shown in the example below, is not supported. This is
because we cannot trace the metric result tensor back to the model's inputs.
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1')
```
Args:
value: Metric tensor.
name: String metric name.
**kwargs: Additional keyword arguments for backward compatibility.
Accepted values:
`aggregation` - When the `value` tensor provided is not the result of
calling a `keras.Metric` instance, it will be aggregated by default
using a `keras.Metric.Mean`.
"""
kwargs_keys = list(kwargs.keys())
if (len(kwargs_keys) > 1 or
(len(kwargs_keys) == 1 and kwargs_keys[0] != 'aggregation')):
raise TypeError(f'Unknown keyword arguments: {kwargs.keys()}. '
'Expected `aggregation`.')
from_metric_obj = hasattr(value, '_metric_obj')
is_symbolic = isinstance(value, keras_tensor.KerasTensor)
in_call_context = base_layer_utils.call_context().in_call
if name is None and not from_metric_obj:
# Eg. `self.add_metric(math_ops.reduce_sum(x))`
# In eager mode, we use metric name to lookup a metric. Without a name,
# a new Mean metric wrapper will be created on every model/layer call.
# So, we raise an error when no name is provided.
# We will do the same for symbolic mode for consistency although a name
# will be generated if no name is provided.
# We will not raise this error in the foll use case for the sake of
# consistency as name in provided in the metric constructor.
# mean = metrics.Mean(name='my_metric')
# model.add_metric(mean(outputs))
raise ValueError('Please provide a name for your metric like '
'`self.add_metric(tf.reduce_sum(inputs), '
'name=\'mean_activation\')`')
elif from_metric_obj:
name = value._metric_obj.name
if not in_call_context and not is_symbolic:
raise ValueError('Expected a symbolic Tensor for the metric value, '
'received: ' + str(value))
# If a metric was added in a Layer's `call` or `build`.
if in_call_context or not getattr(self, '_is_graph_network', False):
# TF Function path should take the eager path.
# If the given metric is available in `metrics` list we just update state
# on it, otherwise we create a new metric instance and
# add it to the `metrics` list.
metric_obj = getattr(value, '_metric_obj', None)
# Tensors that come from a Metric object already updated the Metric state.
should_update_state = not metric_obj
name = metric_obj.name if metric_obj else name
with self._metrics_lock:
match = self._get_existing_metric(name)
if match:
metric_obj = match
elif metric_obj:
self._metrics.append(metric_obj)
else:
# Build the metric object with the value's dtype if it defines one
metric_obj = metrics_mod.Mean(
name=name, dtype=getattr(value, 'dtype', None))
self._metrics.append(metric_obj)
if should_update_state:
metric_obj(value)
else:
if from_metric_obj:
raise ValueError('Using the result of calling a `Metric` object '
'when calling `add_metric` on a Functional '
'Model is not supported. Please pass the '
'Tensor to monitor directly.')
# Insert layers into the Keras Graph Network.
aggregation = None if from_metric_obj else 'mean'
self._graph_network_add_metric(value, aggregation, name)
@doc_controls.do_not_doc_inheritable
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Args:
updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False`
on this Layer, when executing in Eager mode.
inputs: Deprecated, will be automatically inferred.
"""
if inputs is not None:
tf_logging.warning(
'`add_update` `inputs` kwarg has been deprecated. You no longer need '
'to pass a value to `inputs` as it is being automatically inferred.')
call_context = base_layer_utils.call_context()
# No need to run updates during Functional API construction.
if call_context.in_keras_graph:
return
# Callable updates are disabled by setting `trainable=False`.
if not call_context.frozen:
for update in tf.nest.flatten(updates):
if callable(update):
update() # pylint: disable=not-callable
def set_weights(self, weights):
"""Sets the weights of the layer, from NumPy arrays.
The weights of a layer represent the state of the layer. This function
sets the weight values from numpy arrays. The weight values should be
passed in the order they are created by the layer. Note that the layer's
weights must be instantiated before calling this function, by calling
the layer.
For example, a `Dense` layer returns a list of two values: the kernel matrix
and the bias vector. These can be used to set the weights of another
`Dense` layer:
>>> layer_a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> layer_a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> layer_b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b.set_weights(layer_a.get_weights())
>>> layer_b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Args:
weights: a list of NumPy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
expected_num_weights = 0
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
expected_num_weights += param.num_tensors
else:
expected_num_weights += 1
if expected_num_weights != len(weights):
raise ValueError(
'You called `set_weights(weights)` on layer "%s" '
'with a weight list of length %s, but the layer was '
'expecting %s weights. Provided weights: %s...' %
(self.name, len(weights), expected_num_weights, str(weights)[:50]))
weight_index = 0
weight_value_tuples = []
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
num_tensors = param.num_tensors
tensors = weights[weight_index:weight_index + num_tensors]
param.set_weights(tensors)
weight_index += num_tensors
else:
weight = weights[weight_index]
weight_shape = weight.shape if hasattr(weight, 'shape') else ()
ref_shape = param.shape
if not ref_shape.is_compatible_with(weight_shape):
raise ValueError(
f'Layer {self.name} weight shape {ref_shape} '
'is not compatible with provided weight '
f'shape {weight_shape}.')
weight_value_tuples.append((param, weight))
weight_index += 1
backend.batch_set_value(weight_value_tuples)
# Perform any layer defined finalization of the layer state.
for layer in self._flatten_layers():
layer.finalize_state()
def get_weights(self):
"""Returns the current weights of the layer, as NumPy arrays.
The weights of a layer represent the state of the layer. This function
returns both trainable and non-trainable weight values associated with this
layer as a list of NumPy arrays, which can in turn be used to load state
into similarly parameterized layers.
For example, a `Dense` layer returns a list of two values: the kernel matrix
and the bias vector. These can be used to set the weights of another
`Dense` layer:
>>> layer_a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = layer_a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> layer_a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = layer_b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> layer_b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> layer_b.set_weights(layer_a.get_weights())
>>> layer_b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Returns:
Weights values as a list of NumPy arrays.
"""
weights = self.weights
output_weights = []
for weight in weights:
if isinstance(weight, base_layer_utils.TrackableWeightHandler):
output_weights.extend(weight.get_tensors())
else:
output_weights.append(weight)
return backend.batch_get_value(output_weights)
@doc_controls.do_not_generate_docs
def finalize_state(self):
"""Finalizes the layers state after updating layer weights.
This function can be subclassed in a layer and will be called after updating
a layer weights. It can be overridden to finalize any additional layer state
after a weight update.
This function will be called after weights of a layer have been restored
from a loaded model.
"""
pass
@doc_controls.do_not_generate_docs
def get_updates_for(self, inputs):
"""Deprecated, do NOT use!
Retrieves updates relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
"""
warnings.warn(
'`layer.get_updates_for` is deprecated and '
'will be removed in a future version. '
'Please use `layer.updates` method instead.',
stacklevel=2)
return self.updates
@doc_controls.do_not_generate_docs
def get_losses_for(self, inputs):
"""Deprecated, do NOT use!
Retrieves losses relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
"""
warnings.warn(
'`layer.get_losses_for` is deprecated and '
'will be removed in a future version. '
'Please use `layer.losses` instead.',
stacklevel=2)
return self.losses
@doc_controls.do_not_doc_inheritable
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@doc_controls.do_not_doc_inheritable
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
@doc_controls.do_not_doc_inheritable
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
@doc_controls.do_not_doc_inheritable
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@doc_controls.do_not_doc_inheritable
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
@doc_controls.do_not_doc_inheritable
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
@doc_controls.do_not_doc_inheritable
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first input node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
@doc_controls.do_not_doc_inheritable
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first output node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
@doc_controls.do_not_doc_inheritable
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError(f'The layer "{self.name}" has never been called '
'and thus has no defined input shape. Note that the '
'`input_shape` property is only available for '
'Functional and Sequential models.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
'" has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if getattr(self, '_is_graph_network', False):
with tf_utils.maybe_init_scope(self):
self._maybe_build(self.inputs)
else:
raise ValueError('You tried to call `count_params` '
f'on layer {self.name}'
', but the layer isn\'t built. '
'You can build it manually via: '
f'`{self.name}.build(batch_input_shape)`.')
return layer_utils.count_params(self.weights)
@property
@doc_controls.do_not_doc_inheritable
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError(f'The layer "{self.name}" has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
@doc_controls.do_not_doc_inheritable
def apply(self, inputs, *args, **kwargs):
"""Deprecated, do NOT use!
This is an alias of `self.__call__`.
Args:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
warnings.warn(
'`layer.apply` is deprecated and '
'will be removed in a future version. '
'Please use `layer.__call__` method instead.',
stacklevel=2)
return self.__call__(inputs, *args, **kwargs)
@doc_controls.do_not_doc_inheritable
def add_variable(self, *args, **kwargs):
"""Deprecated, do NOT use! Alias for `add_weight`."""
warnings.warn(
'`layer.add_variable` is deprecated and '
'will be removed in a future version. '
'Please use `layer.add_weight` method instead.',
stacklevel=2)
return self.add_weight(*args, **kwargs)
@property
@doc_controls.do_not_generate_docs
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Note: This will not track the weights of nested `tf.Modules` that are not
themselves Keras layers.
Returns:
A list of variables.
"""
return self.weights
@property
@doc_controls.do_not_generate_docs
def trainable_variables(self):
return self.trainable_weights
@property
@doc_controls.do_not_generate_docs
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
@property
def _inbound_nodes(self):
return self._inbound_nodes_value
@_inbound_nodes.setter
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _inbound_nodes(self, value):
self._inbound_nodes_value = value
@property
def _outbound_nodes(self):
return self._outbound_nodes_value
@_outbound_nodes.setter
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _outbound_nodes(self, value):
self._outbound_nodes_value = value
def _set_dtype_policy(self, dtype):
"""Sets self._dtype_policy."""
if isinstance(dtype, policy.Policy):
self._dtype_policy = dtype
elif isinstance(dtype, dict):
self._dtype_policy = policy.deserialize(dtype)
elif isinstance(dtype, str) and dtype in ('mixed_float16',
'mixed_bfloat16'):
# The isinstance check is required since np.dtype raises an error if
# compared to a non-dtype string.
self._dtype_policy = policy.Policy(dtype)
elif dtype:
self._dtype_policy = policy.Policy(tf.as_dtype(dtype).name)
else:
self._dtype_policy = policy.global_policy()
if (self._dtype_policy.name == 'mixed_float16' and
not loss_scale_optimizer.strategy_supports_loss_scaling()):
# Although only loss scaling doesn't support certain strategies, to avoid
# confusion, we disallow the 'mixed_float16' policy with unsupported
# strategies. This is because 'mixed_float16' requires loss scaling for
# numeric stability.
strategy = tf.distribute.get_strategy()
raise ValueError('Mixed precision is not supported with the '
'tf.distribute.Strategy: %s. Either stop using mixed '
'precision by removing the use of the "%s" policy or '
'use a different Strategy, e.g. a MirroredStrategy.' %
(strategy.__class__.__name__, self._dtype_policy.name))
# Performance optimization: cache the compute dtype as a Dtype object or
# None, so that str to Dtype conversion doesn't happen in Layer.__call__.
# TODO(b/157486353): Investigate returning DTypes in Policy.
if self._dtype_policy.compute_dtype:
self._compute_dtype_object = tf.as_dtype(
self._dtype_policy.compute_dtype)
else:
self._compute_dtype_object = None
@property
def dtype_policy(self):
"""The dtype policy associated with this layer.
This is an instance of a `tf.keras.mixed_precision.Policy`.
"""
return self._dtype_policy
@property
def compute_dtype(self):
"""The dtype of the layer's computations.
This is equivalent to `Layer.dtype_policy.compute_dtype`. Unless
mixed precision is used, this is the same as `Layer.dtype`, the dtype of
the weights.
Layers automatically cast their inputs to the compute dtype, which causes
computations and the output to be in the compute dtype as well. This is done
by the base Layer class in `Layer.__call__`, so you do not have to insert
these casts if implementing your own layer.
Layers often perform certain internal computations in higher precision when
`compute_dtype` is float16 or bfloat16 for numeric stability. The output
will still typically be float16 or bfloat16 in such cases.
Returns:
The layer's compute dtype.
"""
return self._dtype_policy.compute_dtype
@property
def _compute_dtype(self):
"""Deprecated alias of `compute_dtype`."""
return self._dtype_policy.compute_dtype
@property
def variable_dtype(self):
"""Alias of `Layer.dtype`, the dtype of the weights."""
return self.dtype
def _maybe_cast_inputs(self, inputs, input_list=None):
"""Maybe casts the inputs to the compute dtype.
If self._compute_dtype is floating-point, and self_autocast is True,
floating-point inputs are casted to self._compute_dtype.
Args:
inputs: Input tensor, or structure of input tensors.
input_list: Flat list of input tensors.
Returns:
`inputs`, but tensors may have been casted to self._compute_dtype
"""
if not input_list:
input_list = tf.nest.flatten(inputs)
compute_dtype_object = self._compute_dtype_object
should_autocast = (
self._autocast and compute_dtype_object and
compute_dtype_object.is_floating)
if (should_autocast and
any(map(self._should_cast_single_input, input_list))):
# Only perform expensive `nest` operation when needed.
return tf.nest.map_structure(self._cast_single_input, inputs)
else:
return inputs
def _should_cast_single_input(self, x):
if isinstance(x, _AUTOCAST_TYPES):
return (self._compute_dtype_object and
x.dtype != self._compute_dtype_object and x.dtype.is_floating)
return False
def _cast_single_input(self, x):
"""Cast a single Tensor or TensorSpec to the compute dtype."""
if self._should_cast_single_input(x):
return tf.cast(x, self._compute_dtype_object)
else:
return x
# _dtype used to be an attribute set in the constructor. We still expose it
# because some clients still use it.
# TODO(reedwm): Deprecate, then remove the _dtype property.
@property
def _dtype(self):
# This is equivalent to returning self.dtype . We do not return self.dtype
# as it would cause infinite recursion in a few subclasses, which override
# "dtype" to return self._dtype.
return self._dtype_policy.variable_dtype
@_dtype.setter
def _dtype(self, value):
value = tf.as_dtype(value).name
self._set_dtype_policy(policy.Policy(value))
def _name_scope(self): # pylint: disable=method-hidden
if not tf.__internal__.tf2.enabled():
return self.name
name_scope = self.name
if _is_name_scope_on_model_declaration_enabled and self._outer_name_scope:
name_scope = self._outer_name_scope + '/' + name_scope
current_name_scope = tf.__internal__.get_name_scope()
if current_name_scope:
name_scope = current_name_scope + '/' + name_scope
if name_scope:
# Note that the trailing `/` prevents autogenerated
# numerical suffixes to get appended. It will also fully reset
# nested name scope (i.e. the outer name scope has no effect).
name_scope += '/'
return name_scope
def _init_set_name(self, name, zero_based=True):
if name is None:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
elif isinstance(name, str):
backend.observe_object_name(name)
self._name = name
else:
raise TypeError(
f'Expected `name` argument to be a string, but got: {name}')
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with backend.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if base_layer_utils.is_split_variable(variable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = tf.nest.flatten(outputs)
with backend.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = tf.cast(
tf.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
self.add_loss(mean_activity_loss)
def _set_mask_metadata(self, inputs, outputs, previous_mask, build_graph):
# Many `Layer`s don't need to call `compute_mask`.
# This method is optimized to do as little work as needed for the common
# case.
if not self._supports_masking:
return
flat_outputs = tf.nest.flatten(outputs)
mask_already_computed = (
getattr(self, '_compute_output_and_mask_jointly', False) or
all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))
if mask_already_computed:
if build_graph:
self._set_mask_keras_history_checked(flat_outputs)
return
output_masks = self.compute_mask(inputs, previous_mask)
if output_masks is None:
return
flat_masks = tf.nest.flatten(output_masks)
for tensor, mask in zip(flat_outputs, flat_masks):
try:
tensor._keras_mask = mask
except AttributeError:
# C Type such as np.ndarray.
pass
if build_graph:
self._set_mask_keras_history_checked(flat_outputs)
def _set_mask_keras_history_checked(self, flat_outputs):
for output in flat_outputs:
if getattr(output, '_keras_mask', None) is not None:
# Do not track masks for `TensorFlowOpLayer` construction.
output._keras_mask._keras_history_checked = True
def _get_input_masks(self, inputs, input_list, args, kwargs):
if not self._supports_masking and not self._expects_mask_arg:
# Input masks only need to be retrieved if they are needed for `call`
# or `compute_mask`.
input_masks = None
implicit_mask = False
elif self._call_arg_was_passed('mask', args, kwargs):
input_masks = self._get_call_arg_value('mask', args, kwargs)
implicit_mask = False
else:
input_masks = [getattr(t, '_keras_mask', None) for t in input_list]
if all(mask is None for mask in input_masks):
input_masks = None
implicit_mask = False
else:
# Only do expensive `nest` op when masking is actually being used.
input_masks = tf.nest.pack_sequence_as(inputs, input_masks)
implicit_mask = True
return input_masks, implicit_mask
def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):
# Performance optimization: do no work in most common case.
if not args and not kwargs:
return False
if arg_name in kwargs:
return True
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
return arg_name in dict(zip(call_fn_args, args))
def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return kwargs[arg_name]
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
args_dict = dict(zip(call_fn_args, args))
return args_dict[arg_name]
def _set_call_arg_value(
self, arg_name, new_value, args,
kwargs, inputs_in_args=False, pop_kwarg_if_none=False):
arg_pos = self._call_fn_arg_positions.get(arg_name, None)
if arg_pos is not None:
if not inputs_in_args:
# Ignore `inputs` arg.
arg_pos = arg_pos - 1
if len(args) > arg_pos:
args = list(args)
args[arg_pos] = new_value
return tuple(args), kwargs
if new_value is None and pop_kwarg_if_none:
kwargs.pop(arg_name, None)
else:
kwargs[arg_name] = new_value
return args, kwargs
def _set_connectivity_metadata(self, args, kwargs, outputs):
# If the layer returns tensors from its inputs unmodified,
# we copy them to avoid loss of KerasHistory metadata.
flat_outputs = tf.nest.flatten(outputs)
flat_inputs = tf.nest.flatten((args, kwargs))
input_ids_set = {id(i) for i in flat_inputs}
outputs_copy = []
for x in flat_outputs:
if id(x) in input_ids_set:
with backend.name_scope(self.name):
x = tf.identity(x)
outputs_copy.append(x)
outputs = tf.nest.pack_sequence_as(outputs, outputs_copy)
# Create node, Node wires itself to inbound and outbound layers.
# The Node constructor actually updates this layer's self._inbound_nodes,
# sets _keras_history on the outputs, and adds itself to the
# `_outbound_nodes` of the layers that produced the inputs to this
# layer call.
node_module.Node(self, call_args=args, call_kwargs=kwargs, outputs=outputs)
return outputs
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Args:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError(f'The layer {self.name} has never been called '
'and thus has no defined {attr_name}.')
if not len(self._inbound_nodes) > node_index:
raise ValueError(f'Asked to get {attr_name} at node '
f'{node_index}, but the layer has only '
f'{len(self._inbound_nodes)} inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
if not self.built:
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = tf.nest.flatten(inputs)
if input_list and self._dtype_policy.compute_dtype is None:
try:
dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
else:
self._set_dtype_policy(policy.Policy(dtype))
input_shapes = None
# Converts Tensors / CompositeTensors to TensorShapes.
if any(hasattr(x, 'shape') for x in input_list):
input_shapes = tf_utils.get_shapes(inputs)
else:
# Converts input shape to TensorShapes.
try:
input_shapes = tf_utils.convert_shapes(inputs, to_tuples=False)
except ValueError:
pass
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
self.build(input_shapes) # pylint:disable=not-callable
# We must set also ensure that the layer is marked as built, and the build
# shape is stored since user defined build functions may not be calling
# `super.build()`
Layer.build(self, input_shapes)
# Optionally load weight values specified at layer instantiation.
if self._initial_weights is not None:
with tf.init_scope():
# Using `init_scope` since we want variable assignment in
# `set_weights` to be treated like variable initialization.
self.set_weights(self._initial_weights)
self._initial_weights = None
def _symbolic_call(self, inputs):
input_shapes = tf.nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
# Convert to TensorShape so that nest.map_structure will not map into
# individual dim of the shape.
output_shapes = tf_utils.convert_shapes(output_shapes, to_tuples=False)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
return tf.nest.map_structure(_make_placeholder_like, output_shapes)
def _get_trainable_state(self):
"""Get the `trainable` state of each sublayer.
Returns:
A dict mapping all sublayers to their `trainable` value.
"""
trainable_state = weakref.WeakKeyDictionary()
for layer in self._flatten_layers():
trainable_state[layer] = layer.trainable
return trainable_state
def _set_trainable_state(self, trainable_state):
"""Set `trainable` state for each sublayer."""
for layer in self._flatten_layers():
if layer in trainable_state:
layer.trainable = trainable_state[layer]
@property
def _obj_reference_counts(self):
"""A dictionary counting the number of attributes referencing an object."""
self._maybe_create_attribute('_obj_reference_counts_dict',
object_identity.ObjectIdentityDictionary())
return self._obj_reference_counts_dict
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _maybe_create_attribute(self, name, default_value):
"""Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
"""
if not hasattr(self, name):
self.__setattr__(name, default_value)
def __delattr__(self, name):
# For any super.__delattr__() call, we will directly use the implementation
# in Trackable and skip the behavior in AutoTrackable. The Layer was
# originally use Trackable as base class, the change of using Module as base
# class forced us to have AutoTrackable in the class hierarchy.
#
# TODO(b/180760306) Keeping the status quo of skipping _delattr__ and
# __setattr__ in AutoTrackable may be unsustainable.
existing_value = getattr(self, name, None)
# If this value is replacing an existing object assigned to an attribute, we
# should clean it out to avoid leaking memory. First we check if there are
# other attributes referencing it.
reference_counts = self._obj_reference_counts
if existing_value not in reference_counts:
super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
return
reference_count = reference_counts[existing_value]
if reference_count > 1:
# There are other remaining references. We can't remove this object from
# _layers etc.
reference_counts[existing_value] = reference_count - 1
super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
return
else:
# This is the last remaining reference.
del reference_counts[existing_value]
super(tf.__internal__.tracking.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
if (isinstance(existing_value, Layer)
or base_layer_utils.has_weights(existing_value)):
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_self_tracked_trackables',
[l for l in self._self_tracked_trackables if l is not existing_value])
if isinstance(existing_value, tf.Variable):
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_trainable_weights',
[w for w in self._trainable_weights if w is not existing_value])
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_non_trainable_weights',
[w for w in self._non_trainable_weights if w is not existing_value])
def __setattr__(self, name, value):
if (name == '_self_setattr_tracking' or
not getattr(self, '_self_setattr_tracking', True) or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)):
try:
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call
except AttributeError:
raise AttributeError(
('Can\'t set the attribute "{}", likely because it conflicts with '
'an existing read-only @property of the object. Please choose a '
'different name.').format(name))
return
# Wraps data structures in `Trackable`, unwraps `NoDependency` objects.
value = tf.__internal__.tracking.sticky_attribute_assignment(
trackable=self, value=value, name=name)
reference_counts = self._obj_reference_counts
reference_counts[value] = reference_counts.get(value, 0) + 1
# Clean out the old attribute, which clears _layers and _trainable_weights
# if necessary.
try:
self.__delattr__(name)
except AttributeError:
pass
# Keep track of metric instance created in subclassed layer.
for val in tf.nest.flatten(value):
if isinstance(val, metrics_mod.Metric) and hasattr(self, '_metrics'):
self._metrics.append(val)
# Append value to self._self_tracked_trackables if relevant
if (getattr(self, '_auto_track_sub_layers', True) and
(isinstance(value, tf.Module) or
base_layer_utils.has_weights(value))):
self._maybe_create_attribute('_self_tracked_trackables', [])
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._self_tracked_trackables)):
self._self_tracked_trackables.append(value)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
# TODO(b/125122625): This won't pick up on any variables added to a
# list/dict after creation.
for val in tf.nest.flatten(value, expand_composites=True):
if not isinstance(val, tf.Variable):
continue
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
if val.trainable:
if any(val is w for w in self._trainable_weights):
continue
self._trainable_weights.append(val)
else:
if any(val is w for w in self._non_trainable_weights):
continue
self._non_trainable_weights.append(val)
backend.track_variable(val)
# TODO(b/180760306) Skip the auto trackable from tf.Module to keep status
# quo. See the comment at __delattr__.
super(tf.__internal__.tracking.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call
def _gather_children_attribute(self, attribute):
assert attribute in {
'variables', 'trainable_variables', 'non_trainable_variables'
}
if hasattr(self, '_self_tracked_trackables'):
nested_layers = self._flatten_modules(include_self=False, recursive=False)
return list(
itertools.chain.from_iterable(
getattr(layer, attribute) for layer in nested_layers))
return []
def _flatten_layers(self, recursive=True, include_self=True):
for m in self._flatten_modules(
recursive=recursive, include_self=include_self):
if isinstance(m, Layer):
yield m
def _flatten_modules(self, recursive=True, include_self=True):
"""Flattens `tf.Module` instances (excluding `Metrics`).
Args:
recursive: Whether to recursively flatten through submodules.
include_self: Whether to include this `Layer` instance.
Yields:
`tf.Module` instance tracked by this `Layer`.
"""
if include_self:
yield self
# Only instantiate set and deque if needed.
trackables = getattr(self, '_self_tracked_trackables', None)
if trackables:
seen_object_ids = set()
deque = collections.deque(trackables)
while deque:
trackable_obj = deque.popleft()
trackable_id = id(trackable_obj)
if trackable_id in seen_object_ids:
continue
seen_object_ids.add(trackable_id)
# Metrics are not considered part of the Layer's topology.
if (isinstance(trackable_obj, tf.Module) and
not isinstance(trackable_obj, metrics_mod.Metric)):
yield trackable_obj
# Introspect recursively through sublayers.
if recursive:
subtrackables = getattr(trackable_obj, '_self_tracked_trackables',
None)
if subtrackables:
deque.extendleft(reversed(subtrackables))
elif isinstance(trackable_obj, tf.__internal__.tracking.TrackableDataStructure):
# Data structures are introspected even with `recursive=False`.
tracked_values = trackable_obj._values
if tracked_values:
deque.extendleft(reversed(tracked_values))
# This is a hack so that the is_layer (within
# training/trackable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
def _init_call_fn_args(self, expects_training_arg=None):
# Clear cached call function arguments.
self.__class__._call_full_argspec.fget.cache.pop(self, None)
self.__class__._call_fn_args.fget.cache.pop(self, None)
self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)
call_fn_args = self._call_fn_args
call_fn_args += self._call_full_argspec.kwonlyargs or []
if expects_training_arg is None:
self._expects_training_arg = ('training' in call_fn_args or
self._call_accepts_kwargs)
else:
# Use value encoded into the metadata when loading from the SavedModel.
self._expects_training_arg = expects_training_arg
# The default training arg will be any (non-None) default specified in the
# method signature, or None if no value is specified.
call_fn_arg_defaults = self._call_fn_arg_defaults.copy()
call_fn_arg_defaults.update(self._call_full_argspec.kwonlydefaults or {})
self._default_training_arg = call_fn_arg_defaults.get('training')
self._expects_mask_arg = ('mask' in call_fn_args or
self._call_accepts_kwargs)
@property
@layer_utils.cached_per_instance
def _call_full_argspec(self):
# Argspec inspection is expensive and the call spec is used often, so it
# makes sense to cache the result.
return tf_inspect.getfullargspec(self.call)
@property
@layer_utils.cached_per_instance
def _call_fn_args(self):
all_args = self._call_full_argspec.args
# Scrub `self` that appears if a decorator was applied.
if all_args and all_args[0] == 'self':
return all_args[1:]
return all_args
@property
@layer_utils.cached_per_instance
def _call_fn_arg_defaults(self):
call_fn_args = self._call_fn_args
call_fn_defaults = self._call_full_argspec.defaults or []
defaults = dict()
# The call arg defaults are an n-tuple of the last n elements of the args
# list. (n = # of elements that have a default argument)
for i in range(-1 * len(call_fn_defaults), 0):
defaults[call_fn_args[i]] = call_fn_defaults[i]
return defaults
@property
@layer_utils.cached_per_instance
def _call_fn_arg_positions(self):
call_fn_arg_positions = dict()
for pos, arg in enumerate(self._call_fn_args):
call_fn_arg_positions[arg] = pos
return call_fn_arg_positions
@property
@layer_utils.cached_per_instance
def _call_accepts_kwargs(self):
return self._call_full_argspec.varkw is not None
@property
def _eager_losses(self):
# A list of loss values containing activity regularizers and losses
# manually added through `add_loss` during eager execution. It is cleared
# after every batch.
# Because we plan on eventually allowing a same model instance to be trained
# in eager mode or graph mode alternatively, we need to keep track of
# eager losses and symbolic losses via separate attributes.
if not hasattr(self._thread_local, '_eager_losses'):
self._thread_local._eager_losses = []
return self._thread_local._eager_losses
@_eager_losses.setter
def _eager_losses(self, losses):
self._thread_local._eager_losses = losses
def _dedup_weights(self, weights):
"""Dedupe weights while maintaining order as much as possible."""
output, seen_ids = [], set()
for w in weights:
if id(w) not in seen_ids:
output.append(w)
# Track the Variable's identity to avoid __eq__ issues.
seen_ids.add(id(w))
return output
def _split_out_first_arg(self, args, kwargs):
# Grab the argument corresponding to the first argument in the
# layer's `call` method spec. This will either be the first positional
# argument, or it will be provided as a keyword argument.
if args:
inputs = args[0]
args = args[1:]
elif self._call_fn_args[0] in kwargs:
kwargs = copy.copy(kwargs)
inputs = kwargs.pop(self._call_fn_args[0])
else:
raise ValueError(
'The first argument to `Layer.call` must always be passed.')
return inputs, args, kwargs
# SavedModel properties. Please see keras/saving/saved_model for details.
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _set_save_spec(self, inputs, args=None, kwargs=None):
"""Defines the save spec so that serialization is able to trace layer call.
The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are
saved into a tuple of `([inputs] + args, kwargs)`.
Args:
inputs: possibly nested inputs passed into the call function.
args: a list of positional arguments passed into call.
kwargs: a dictionary of keyword arguments passed into call.
"""
if self._saved_model_inputs_spec is not None:
return # Already set.
args = args or []
kwargs = kwargs or {}
inputs_spec = tf.nest.map_structure(tf_utils.get_tensor_spec, inputs)
# Filter out non-tensor arguments from args and kwargs.
args_spec = []
for arg in args:
flat_arg = tf.nest.flatten(arg)
flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_arg]
if any(s is None for s in flat_specs):
break # Stop recording positional args once a non-tensor has been found
args_spec.append(tf.nest.pack_sequence_as(arg, flat_specs))
kwargs_spec = {}
for key, kwarg in kwargs.items():
if key == 'training':
continue
flat_kwarg = tf.nest.flatten(kwarg)
flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg]
if any(s is None for s in flat_specs):
continue
kwargs[key] = args_spec.append(
tf.nest.pack_sequence_as(kwarg, flat_specs))
self._saved_model_inputs_spec = inputs_spec
self._saved_model_arg_spec = ([inputs_spec] + args_spec, kwargs_spec)
def _get_save_spec(self, dynamic_batch=True, inputs_only=True):
if self._saved_model_inputs_spec is None:
return None
spec = tf.nest.map_structure(
lambda t: tf_utils.get_tensor_spec(t, dynamic_batch=dynamic_batch),
self._saved_model_arg_spec)
return spec[0][0] if inputs_only else spec
@property
def _trackable_saved_model_saver(self):
return layer_serialization.LayerSavedModelSaver(self)
@property
def _object_identifier(self):
return self._trackable_saved_model_saver.object_identifier
@property
def _tracking_metadata(self):
"""Info about this layer to be saved into the SavedModel."""
return self._trackable_saved_model_saver.tracking_metadata
def _list_extra_dependencies_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_extra_dependencies_for_serialization(serialization_cache))
def _list_functions_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_functions_for_serialization(serialization_cache))
@property
def _use_input_spec_as_call_signature(self):
# Whether input spec can be used as the call signature when tracing the
# Layer for SavedModel. By default, this is set to `True` for layers
# exported from the Keras library, because the layers more rigidly define
# the `input_specs` property (many custom layers only set the `ndims`)
return get_canonical_name_for_symbol(type(self),
api_name='keras') is not None
def __getstate__(self):
# Override to support `copy.deepcopy` and pickling.
# Thread-local objects cannot be copied in Python 3, so pop these.
# Thread-local objects are used to cache losses in MirroredStrategy, and
# so shouldn't be copied.
state = self.__dict__.copy()
state.pop('_thread_local', None)
state.pop('_metrics_lock', None)
return state
def __setstate__(self, state):
state['_thread_local'] = threading.local()
state['_metrics_lock'] = threading.Lock()
# Bypass Trackable logic as `__dict__` already contains this info.
object.__setattr__(self, '__dict__', state)
class TensorFlowOpLayer(Layer):
"""Wraps a TensorFlow Operation in a Layer.
This class is used internally by the Functional API. When a user
uses a raw TensorFlow Operation on symbolic tensors originating
from an `Input` Layer, the resultant operation will be wrapped
with this Layer object in order to make the operation compatible
with the Keras API.
This Layer will create a new, identical operation (except for inputs
and outputs) every time it is called. If `run_eagerly` is `True`,
the op creation and calculation will happen inside an Eager function.
Instances of this Layer are created when `autolambda` is called, which
is whenever a Layer's `__call__` encounters symbolic inputs that do
not have Keras metadata, or when a Network's `__init__` encounters
outputs that do not have Keras metadata.
Attributes:
node_def: String, the serialized NodeDef of the Op this layer will wrap.
name: String, the name of the Layer.
constants: Dict of NumPy arrays, the values of any Tensors needed for this
Operation that do not originate from a Keras `Input` Layer. Since all
placeholders must come from Keras `Input` Layers, these Tensors must be
treated as constant in the Functional API.
trainable: Bool, whether this Layer is trainable. Currently Variables are
not supported, and so this parameter has no effect.
dtype: The default dtype of this Layer. Inherited from `Layer` and has no
effect on this class, however is used in `get_config`.
"""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self,
node_def,
name,
constants=None,
trainable=True,
dtype=None):
# Pass autocast=False, as if inputs are cast, input types might not match
# Operation type.
super(TensorFlowOpLayer, self).__init__(
name=_TF_OP_LAYER_NAME_PREFIX + name, trainable=trainable, dtype=dtype,
autocast=False)
if isinstance(node_def, dict):
self.node_def = json_format.ParseDict(node_def, tf.compat.v1.NodeDef())
else:
if not isinstance(node_def, bytes):
node_def = node_def.encode('utf-8')
self.node_def = tf.compat.v1.NodeDef.FromString(node_def)
# JSON serialization stringifies keys which are integer input indices.
self.constants = ({
int(index): constant for index, constant in constants.items()
} if constants is not None else {})
# Layer uses original op unless it is called on new inputs.
# This means `built` is not set in `__call__`.
self.built = True
# Do not individually trace TensorflowOpLayers in the SavedModel.
self._must_restore_from_config = True
def call(self, inputs):
if tf.executing_eagerly():
return self._defun_call(inputs)
return self._make_op(inputs)
def _make_node_def(self, graph):
node_def = tf.compat.v1.NodeDef()
node_def.CopyFrom(self.node_def)
# Used in TPUReplicateContext to indicate whether this node has been cloned
# and to not add TPU attributes.
node_def.attr['_cloned'].b = True
node_def.name = graph.unique_name(node_def.name)
return node_def
def _make_op(self, inputs):
inputs = tf.nest.flatten(inputs)
graph = inputs[0].graph
node_def = self._make_node_def(graph)
with graph.as_default():
for index, constant in self.constants.items():
# Recreate constant in graph to add distribution context.
value = tf.get_static_value(constant)
if value is not None:
constant = tf.constant(value, name=node_def.input[index])
inputs.insert(index, constant)
# TODO(b/183990973): We should drop or consolidate these private api calls
# for adding an op to the graph and recording its gradient.
c_op = tf.__internal__.create_c_op(graph, node_def, inputs, control_inputs=[])
op = graph._create_op_from_tf_operation(c_op)
op._control_flow_post_processing()
# Record the gradient because custom-made ops don't go through the
# code-gen'd eager call path
op_type = tf.compat.as_str(op.op_def.name)
attr_names = [tf.compat.as_str(attr.name) for attr in op.op_def.attr]
attrs = []
for attr_name in attr_names:
attrs.append(attr_name)
attrs.append(op.get_attr(attr_name))
attrs = tuple(attrs)
tf.__internal__.record_gradient(op_type, op.inputs, attrs, op.outputs)
if len(op.outputs) == 1:
return op.outputs[0]
return op.outputs
@tf.function
def _defun_call(self, inputs):
"""Wraps the op creation method in an Eager function for `run_eagerly`."""
return self._make_op(inputs)
def get_config(self):
config = super(TensorFlowOpLayer, self).get_config()
config.update({
# `__init__` prefixes the name. Revert to the constructor argument.
'name': config['name'][len(_TF_OP_LAYER_NAME_PREFIX):],
'node_def': json_format.MessageToDict(self.node_def),
'constants': {
i: backend.get_value(c) for i, c in self.constants.items()
}
})
return config
class AddLoss(Layer):
"""Adds its inputs as a loss.
Attributes:
unconditional: Whether or not the loss should be conditioned on the inputs.
"""
def __init__(self, unconditional, **kwargs):
# Pass autocast=False, as there is no reason to cast loss to a different
# dtype.
kwargs['autocast'] = False
super(AddLoss, self).__init__(**kwargs)
self.unconditional = unconditional
def call(self, inputs):
self.add_loss(inputs, inputs=(not self.unconditional))
return inputs
def get_config(self):
config = super(AddLoss, self).get_config()
config.update({'unconditional': self.unconditional})
return config
class AddMetric(Layer):
"""Adds its inputs as a metric.
Attributes:
aggregation: 'mean' or None. How the inputs should be aggregated.
metric_name: The name to use for this metric.
"""
def __init__(self, aggregation=None, metric_name=None, **kwargs):
super(AddMetric, self).__init__(**kwargs)
self.aggregation = aggregation
self.metric_name = metric_name
def call(self, inputs):
self.add_metric(inputs, aggregation=self.aggregation, name=self.metric_name)
return inputs
def get_config(self):
config = super(AddMetric, self).get_config()
config.update({
'aggregation': self.aggregation,
'metric_name': self.metric_name
})
return config
def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list): # pylint: disable=unused-argument
"""Check the arguments to see if we are constructing a functional model."""
# We are constructing a functional model if any of the inputs
# are KerasTensors
return any(
isinstance(tensor, keras_tensor.KerasTensor)
for tensor in tf.nest.flatten([inputs, args, kwargs]))
def _convert_numpy_or_python_types(x):
if isinstance(x, (tf.Tensor, np.ndarray, float, int)):
return tf.convert_to_tensor(x)
return x
@keras_export(
'keras.__internal__.apply_name_scope_on_model_declaration', v1=[])
def _apply_name_scope_on_model_declaration(enable):
"""Apply `with tf.name_scope(...)` on model declaration.
```python
tf.keras.__internal__.apply_name_scope_on_model_declaration(True)
inputs = input_layer.Input((3,))
with tf.name_scope('MyScope'):
outputs = layers.Dense(10, name='MyDense')(inputs)
model = tf.keras.Model(inputs, outputs)
# with `tf.keras.__internal__.apply_name_scope_on_model_declaration(True)`,
# The name of the dense layer is "model/MyScope/MyDense/*", and without,
# "model/MyDense/*"
```
Args:
enable: Enables if `True`, disables if `False`.
"""
if not isinstance(enable, bool):
raise TypeError(
'`enable` argument must be `True` or `False`, got {}'.format(enable))
global _is_name_scope_on_model_declaration_enabled
_is_name_scope_on_model_declaration_enabled = enable
class BaseRandomLayer(Layer):
"""A layer handle the random number creation and savemodel behavior."""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(self, seed=None, force_generator=False, **kwargs):
"""Initialize the BaseRandomLayer.
Note that the constructor is annotated with
@no_automatic_dependency_tracking. This is to skip the auto
tracking of self._random_generator instance, which is an AutoTrackable.
The backend.RandomGenerator could contain a tf.random.Generator instance
which will have tf.Variable as the internal state. We want to avoid saving
that state into model.weights and checkpoints for backward compatibility
reason. In the meantime, we still need to make them visible to SavedModel
when it is tracing the tf.function for the `call()`.
See _list_extra_dependencies_for_serialization below for more details.
Args:
seed: optional integer, used to create RandomGenerator.
force_generator: boolean, default to False, whether to force the
RandomGenerator to use the code branch of tf.random.Generator.
**kwargs: other keyword arguments that will be passed to the parent class
"""
super().__init__(**kwargs)
self._random_generator = backend.RandomGenerator(
seed, force_generator=force_generator)
def _list_extra_dependencies_for_serialization(self, serialization_cache):
# This method exposes the self._random_generator to SavedModel only
# (not layer.weights and checkpoint).
deps = super()._list_extra_dependencies_for_serialization(
serialization_cache)
deps['_random_generator'] = self._random_generator
return deps
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
|
# Dependencies: flask, tornado
from __future__ import absolute_import, division, print_function
# HTTP / HTML
import tornado.wsgi
import tornado.httpserver
import flask
from flask import request, redirect, url_for, make_response
import optparse
import logging
import socket
import simplejson as json
# IBEIS
import ibeis
from ibeis.control.SQLDatabaseControl import (SQLDatabaseController, # NOQA
SQLAtomicContext)
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY, Species
import utool as ut
# Web Internal
from ibeis.web import appfuncs as ap
# Others
import ibeis.constants as const
import random
BROWSER = ut.get_argflag('--browser')
DEFAULT_PORT = 5000
app = flask.Flask(__name__)
################################################################################
def encounter_image_processed(gid_list):
images_reviewed = [ reviewed == 1 for reviewed in app.ibs.get_image_reviewed(gid_list) ]
return images_reviewed
def encounter_annot_viewpoint_processed(aid_list):
annots_reviewed = [ reviewed is not None for reviewed in app.ibs.get_annot_yaws(aid_list) ]
return annots_reviewed
def encounter_annot_quality_processed(aid_list):
annots_reviewed = [ reviewed is not None and reviewed is not -1 for reviewed in app.ibs.get_annot_qualities(aid_list) ]
return annots_reviewed
################################################################################
# @app.after_request
# def add_header(response):
# response.headers['Cache-Control'] = 'public, max-age=%d' % (60 * 60 * 24, )
# return response
@app.route('/')
def root():
return ap.template(None)
@app.route('/view')
def view():
eid_list = app.ibs.get_valid_eids()
gid_list = app.ibs.get_valid_gids()
aid_list = app.ibs.get_valid_aids()
nid_list = app.ibs.get_valid_nids()
return ap.template('view',
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list))
@app.route('/view/encounters')
def view_encounters():
filtered = True
eid = request.args.get('eid', '')
if len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
else:
eid_list = app.ibs.get_valid_eids()
filtered = False
start_time_posix_list = app.ibs.get_encounter_start_time_posix(eid_list)
datetime_list = [
ut.unixtime_to_datetime(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ app.ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ]
aids_list = [ app.ibs.get_valid_aids(include_only_gid_list=gid_list) for gid_list in gids_list ]
images_reviewed_list = [ encounter_image_processed(gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ encounter_annot_viewpoint_processed(aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ encounter_annot_quality_processed(aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
encounter_list = zip(
eid_list,
app.ibs.get_encounter_enctext(eid_list),
app.ibs.get_encounter_num_gids(eid_list),
image_processed_list,
app.ibs.get_encounter_num_aids(eid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
encounter_list.sort(key=lambda t: t[7])
return ap.template('view', 'encounters',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
encounter_list=encounter_list,
num_encounters=len(encounter_list))
@app.route('/view/images')
def view_images():
filtered = True
eid_list = []
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ app.ibs.get_valid_gids(eid=eid) for eid_ in eid_list ])
else:
gid_list = app.ibs.get_valid_gids()
filtered = False
image_unixtime_list = app.ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetime(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ eid_list_[0] for eid_list_ in app.ibs.get_image_eids(gid_list) ],
app.ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
app.ibs.get_image_gps(gid_list),
app.ibs.get_image_party_tag(gid_list),
app.ibs.get_image_contributor_tag(gid_list),
app.ibs.get_image_notes(gid_list),
encounter_image_processed(gid_list),
)
image_list.sort(key=lambda t: t[3])
return ap.template('view', 'images',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list))
@app.route('/view/annotations')
def view_annotations():
filtered = True
eid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ app.ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ])
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
else:
aid_list = app.ibs.get_valid_aids()
filtered = False
annotation_list = zip(
aid_list,
app.ibs.get_annot_gids(aid_list),
[ eid_list_[0] for eid_list_ in app.ibs.get_annot_eids(aid_list) ],
app.ibs.get_annot_image_names(aid_list),
app.ibs.get_annot_names(aid_list),
app.ibs.get_annot_exemplar_flags(aid_list),
app.ibs.get_annot_species_texts(aid_list),
app.ibs.get_annot_yaw_texts(aid_list),
app.ibs.get_annot_quality_texts(aid_list),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(encounter_annot_viewpoint_processed(aid_list), encounter_annot_quality_processed(aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return ap.template('view', 'annotations',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list))
@app.route('/turk')
def turk():
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
return ap.template('turk', None, eid=eid)
@app.route('/turk/detection')
def turk_detection():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = app.ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(gid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
gid_list_ = ut.filter_items(gid_list, flag_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('detection_instructions_seen', 0) == 0
display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0
if not finished:
gpath = app.ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = app.ibs.get_image_sizes(gid)
scale_factor = 700.0 / float(width)
aid_list = app.ibs.get_image_aids(gid)
annot_bbox_list = app.ibs.get_annot_bboxes(aid_list)
annot_thetas_list = app.ibs.get_annot_thetas(aid_list)
species_list = app.ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for annot_bbox, annot_theta, species in zip(annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = int(scale_factor * annot_bbox[0])
temp['top'] = int(scale_factor * annot_bbox[1])
temp['width'] = int(scale_factor * (annot_bbox[2]))
temp['height'] = int(scale_factor * (annot_bbox[3]))
temp['label'] = species
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif app.default_species is not None:
species = app.default_species
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
return ap.template('turk', 'detection',
eid=eid,
gid=gid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
review=review)
except Exception as e:
return error404(e)
@app.route('/turk/viewpoint')
def turk_viewpoint():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
aid_list_ = ut.filter_items(aid_list, flag_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value = request.args.get('value', None)
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('viewpoint_instructions_seen', 0) == 0
if not finished:
gid = app.ibs.get_annot_gids(aid)
gpath = app.ibs.get_annot_chip_fpaths(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
return ap.template('turk', 'viewpoint',
eid=eid,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
except Exception as e:
return error404(e)
@app.route('/turk/quality')
def turk_quality():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
print('[web] TEST 0')
gid_list = app.ibs.get_valid_gids(eid=eid)
print('[web] TEST 1')
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
print('[web] TEST 2')
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
print('[web] TEST 3')
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
print('[web] TEST 4')
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_quality_processed(aid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
aid_list_ = ut.filter_items(aid_list, flag_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value = request.args.get('value', None)
review = 'review' in request.args.keys()
finished = aid is None
# display_instructions = request.cookies.get('quality_instructions_seen', 0) == 0
display_instructions = False
if not finished:
gid = app.ibs.get_annot_gids(aid)
gpath = app.ibs.get_annot_chip_fpaths(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
return ap.template('turk', 'quality',
eid=eid,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
except Exception as e:
return error404(e)
@app.route('/submit/detection', methods=['POST'])
def submit_detection():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid = int(request.form['detection-gid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
# app.ibs.delete_images(gid)
# print('[web] (DELETED) turk_id: %s, gid: %d' % (turk_id, gid, ))
pass
elif method.lower() == 'clear':
aid_list = app.ibs.get_image_aids(gid)
app.ibs.delete_annots(aid_list)
print('[web] (CLEAERED) turk_id: %s, gid: %d' % (turk_id, gid, ))
redirection = request.referrer
if 'gid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&gid=%d' % (redirection, gid, )
else:
redirection = '%s?gid=%d' % (redirection, gid, )
return redirect(redirection)
else:
aid_list = app.ibs.get_image_aids(gid)
# Make new annotations
width, height = app.ibs.get_image_sizes(gid)
scale_factor = float(width) / 700.0
# Get aids
app.ibs.delete_annots(aid_list)
annotation_list = json.loads(request.form['detection-annotations'])
bbox_list = [
(
int(scale_factor * annot['left']),
int(scale_factor * annot['top']),
int(scale_factor * annot['width']),
int(scale_factor * annot['height']),
)
for annot in annotation_list
]
theta_list = [
float(annot['angle'])
for annot in annotation_list
]
species_list = [
annot['label']
for annot in annotation_list
]
app.ibs.add_annots([gid] * len(annotation_list), bbox_list, theta_list=theta_list, species_list=species_list)
app.ibs.set_image_reviewed([gid], [1])
print('[web] turk_id: %s, gid: %d, bbox_list: %r, species_list: %r' % (turk_id, gid, annotation_list, species_list))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_detection', eid=eid, previous=gid))
@app.route('/submit/viewpoint', methods=['POST'])
def submit_viewpoint():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['viewpoint-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
app.ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
value = int(request.form['viewpoint-value'])
def convert_old_viewpoint_to_yaw(view_angle):
''' we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0.000 * TAU,),
>>> ('frontleft' , 0.125 * TAU,),
>>> ('front' , 0.250 * TAU,),
>>> ('frontright' , 0.375 * TAU,),
>>> ('right' , 0.500 * TAU,),
>>> ('backright' , 0.625 * TAU,),
>>> ('back' , 0.750 * TAU,),
>>> ('backleft' , 0.875 * TAU,),
>>> ]
>>> fmtstr = 'old %15r %.2f -> new %15r %.2f'
>>> for lbl, angle in old_viewpoint_labels:
>>> print(fmtstr % (lbl, angle, lbl, convert_old_viewpoint_to_yaw(angle)))
'''
if view_angle is None:
return None
yaw = (-view_angle + (const.TAU / 2)) % const.TAU
return yaw
yaw = convert_old_viewpoint_to_yaw(ut.deg_to_rad(value))
app.ibs.set_annot_yaws([aid], [yaw], input_is_degrees=False)
print('[web] turk_id: %s, aid: %d, yaw: %d' % (turk_id, aid, yaw))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_viewpoint', eid=eid, previous=aid))
@app.route('/submit/quality', methods=['POST'])
def submit_quality():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['quality-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
app.ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
quality = int(request.form['quality-value'])
app.ibs.set_annot_qualities([aid], [quality])
print('[web] turk_id: %s, aid: %d, quality: %d' % (turk_id, aid, quality))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_quality', eid=eid, previous=aid))
@app.route('/ajax/cookie')
def set_cookie():
response = make_response('true')
try:
response.set_cookie(request.args['name'], request.args['value'])
print('[web] Set Cookie: %r -> %r' % (request.args['name'], request.args['value'], ))
return response
except:
print('[web] COOKIE FAILED: %r' % (request.args, ))
return make_response('false')
@app.route('/ajax/image/src/<gid>')
def image_src(gid=None):
# gpath = app.ibs.get_image_paths(gid)
gpath = app.ibs.get_image_thumbpath(gid, ensure_paths=True)
return ap.return_src(gpath)
@app.route('/ajax/annotation/src/<aid>')
def annotation_src(aid=None):
gpath = app.ibs.get_annot_chip_fpaths(aid)
return ap.return_src(gpath)
@app.route('/api')
@app.route('/api/<function>.json', methods=['GET', 'POST'])
def api(function=None):
template = {
'status': {
'success': False,
'code': '',
},
}
print('[web] Function:', function)
print('[web] POST:', dict(request.form))
print('[web] GET:', dict(request.args))
if function is None:
template['status']['success'] = True
template['status']['code'] = 'USAGE: /api/[ibeis_function_name].json'
else:
function = function.lower()
if ap.check_valid_function_name(function):
function = 'app.ibs.%s' % function
exists = True
try:
func = eval(function)
ret = func()
except AttributeError:
exists = False
if exists:
template['status']['success'] = True
template['function'] = function
template['return'] = ret
else:
template['status']['success'] = False
template['status']['code'] = 'ERROR: Specified IBEIS function not visible or implemented'
else:
template['status']['success'] = False
template['status']['code'] = 'ERROR: Specified IBEIS function not valid Python function'
return json.dumps(template)
@app.route('/404')
def error404(exception):
print('[web] %r' % (exception, ))
return ap.template(None, '404')
################################################################################
def start_tornado(app, port=5000, browser=BROWSER, blocking=False, reset_db=True):
def _start_tornado():
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start()
# Initialize the web server
logging.getLogger().setLevel(logging.INFO)
try:
app.server_ip_address = socket.gethostbyname(socket.gethostname())
app.port = port
except:
app.server_ip_address = '127.0.0.1'
app.port = port
url = 'http://%s:%s' % (app.server_ip_address, app.port)
print('[web] Tornado server starting at %s' % (url,))
if browser:
import webbrowser
webbrowser.open(url)
# Blocking
_start_tornado()
# if blocking:
# _start_tornado()
# else:
# import threading
# threading.Thread(target=_start_tornado).start()
def start_from_terminal():
'''
Parse command line options and start the server.
'''
parser = optparse.OptionParser()
parser.add_option(
'-p', '--port',
help='which port to serve content on',
type='int', default=DEFAULT_PORT)
parser.add_option(
'--db',
help='specify an IBEIS database',
type='str', default='testdb0')
opts, args = parser.parse_args()
app.ibs = ibeis.opendb(db=opts.db)
print('[web] Pre-computing all image thumbnails...')
app.ibs.compute_all_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
app.ibs.compute_all_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
app.ibs.compute_all_chips()
start_tornado(app, opts.port)
def start_from_ibeis(ibs, port=DEFAULT_PORT):
'''
Parse command line options and start the server.
'''
dbname = ibs.get_dbname()
if dbname == 'CHTA_Master':
app.default_species = Species.CHEETAH
elif dbname == 'ELPH_Master':
app.default_species = Species.ELEPHANT_SAV
elif dbname == 'GIR_Master':
app.default_species = Species.GIRAFFE
elif dbname == 'GZ_Master':
app.default_species = Species.ZEB_GREVY
elif dbname == 'LION_Master':
app.default_species = Species.LION
elif dbname == 'PZ_Master':
app.default_species = Species.ZEB_PLAIN
elif dbname == 'WD_Master':
app.default_species = Species.WILDDOG
elif 'NNP_' in dbname:
app.default_species = Species.ZEB_PLAIN
elif 'GZC' in dbname:
app.default_species = Species.ZEB_PLAIN
else:
app.default_species = None
print('[web] DEFAULT SPECIES: %r' % (app.default_species))
app.ibs = ibs
print('[web] Pre-computing all image thumbnails (with annots)...')
app.ibs.compute_all_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
app.ibs.compute_all_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
app.ibs.compute_all_chips()
start_tornado(app, port)
if __name__ == '__main__':
start_from_terminal()
hotfix
# Dependencies: flask, tornado
from __future__ import absolute_import, division, print_function
# HTTP / HTML
import tornado.wsgi
import tornado.httpserver
import flask
from flask import request, redirect, url_for, make_response
import optparse
import logging
import socket
import simplejson as json
# IBEIS
import ibeis
from ibeis.control.SQLDatabaseControl import (SQLDatabaseController, # NOQA
SQLAtomicContext)
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY, Species
import utool as ut
# Web Internal
from ibeis.web import appfuncs as ap
# Others
import ibeis.constants as const
import random
BROWSER = ut.get_argflag('--browser')
DEFAULT_PORT = 5000
app = flask.Flask(__name__)
################################################################################
def encounter_image_processed(gid_list):
images_reviewed = [ reviewed == 1 for reviewed in app.ibs.get_image_reviewed(gid_list) ]
return images_reviewed
def encounter_annot_viewpoint_processed(aid_list):
annots_reviewed = [ reviewed is not None for reviewed in app.ibs.get_annot_yaws(aid_list) ]
return annots_reviewed
def encounter_annot_quality_processed(aid_list):
annots_reviewed = [ reviewed is not None and reviewed is not -1 for reviewed in app.ibs.get_annot_qualities(aid_list) ]
return annots_reviewed
################################################################################
# @app.after_request
# def add_header(response):
# response.headers['Cache-Control'] = 'public, max-age=%d' % (60 * 60 * 24, )
# return response
@app.route('/')
def root():
return ap.template(None)
@app.route('/view')
def view():
eid_list = app.ibs.get_valid_eids()
gid_list = app.ibs.get_valid_gids()
aid_list = app.ibs.get_valid_aids()
nid_list = app.ibs.get_valid_nids()
return ap.template('view',
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list))
@app.route('/view/encounters')
def view_encounters():
filtered = True
eid = request.args.get('eid', '')
if len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
else:
eid_list = app.ibs.get_valid_eids()
filtered = False
start_time_posix_list = app.ibs.get_encounter_start_time_posix(eid_list)
datetime_list = [
ut.unixtime_to_datetime(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ app.ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ]
aids_list = [ app.ibs.get_valid_aids(include_only_gid_list=gid_list) for gid_list in gids_list ]
images_reviewed_list = [ encounter_image_processed(gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ encounter_annot_viewpoint_processed(aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ encounter_annot_quality_processed(aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
encounter_list = zip(
eid_list,
app.ibs.get_encounter_enctext(eid_list),
app.ibs.get_encounter_num_gids(eid_list),
image_processed_list,
app.ibs.get_encounter_num_aids(eid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
encounter_list.sort(key=lambda t: t[7])
return ap.template('view', 'encounters',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
encounter_list=encounter_list,
num_encounters=len(encounter_list))
@app.route('/view/images')
def view_images():
filtered = True
eid_list = []
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ app.ibs.get_valid_gids(eid=eid) for eid_ in eid_list ])
else:
gid_list = app.ibs.get_valid_gids()
filtered = False
image_unixtime_list = app.ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetime(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ eid_list_[0] for eid_list_ in app.ibs.get_image_eids(gid_list) ],
app.ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
app.ibs.get_image_gps(gid_list),
app.ibs.get_image_party_tag(gid_list),
app.ibs.get_image_contributor_tag(gid_list),
app.ibs.get_image_notes(gid_list),
encounter_image_processed(gid_list),
)
image_list.sort(key=lambda t: t[3])
return ap.template('view', 'images',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list))
@app.route('/view/annotations')
def view_annotations():
filtered = True
eid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ app.ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ])
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
else:
aid_list = app.ibs.get_valid_aids()
filtered = False
annotation_list = zip(
aid_list,
app.ibs.get_annot_gids(aid_list),
[ eid_list_[0] for eid_list_ in app.ibs.get_annot_eids(aid_list) ],
app.ibs.get_annot_image_names(aid_list),
app.ibs.get_annot_names(aid_list),
app.ibs.get_annot_exemplar_flags(aid_list),
app.ibs.get_annot_species_texts(aid_list),
app.ibs.get_annot_yaw_texts(aid_list),
app.ibs.get_annot_quality_texts(aid_list),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(encounter_annot_viewpoint_processed(aid_list), encounter_annot_quality_processed(aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return ap.template('view', 'annotations',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list))
@app.route('/turk')
def turk():
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
return ap.template('turk', None, eid=eid)
@app.route('/turk/detection')
def turk_detection():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = app.ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(gid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
gid_list_ = ut.filter_items(gid_list, flag_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('detection_instructions_seen', 0) == 0
display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0
if not finished:
gpath = app.ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = app.ibs.get_image_sizes(gid)
scale_factor = 700.0 / float(width)
aid_list = app.ibs.get_image_aids(gid)
annot_bbox_list = app.ibs.get_annot_bboxes(aid_list)
annot_thetas_list = app.ibs.get_annot_thetas(aid_list)
species_list = app.ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for annot_bbox, annot_theta, species in zip(annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = int(scale_factor * annot_bbox[0])
temp['top'] = int(scale_factor * annot_bbox[1])
temp['width'] = int(scale_factor * (annot_bbox[2]))
temp['height'] = int(scale_factor * (annot_bbox[3]))
temp['label'] = species
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif app.default_species is not None:
species = app.default_species
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
return ap.template('turk', 'detection',
eid=eid,
gid=gid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
review=review)
except Exception as e:
return error404(e)
@app.route('/turk/viewpoint')
def turk_viewpoint():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
aid_list_ = ut.filter_items(aid_list, flag_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value = request.args.get('value', None)
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('viewpoint_instructions_seen', 0) == 0
if not finished:
gid = app.ibs.get_annot_gids(aid)
gpath = app.ibs.get_annot_chip_fpaths(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
return ap.template('turk', 'viewpoint',
eid=eid,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
except Exception as e:
return error404(e)
@app.route('/turk/quality')
def turk_quality():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
print('[web] TEST 0')
gid_list = app.ibs.get_valid_gids(eid=eid)
print('[web] TEST 1')
aid_list = ut.flatten(app.ibs.get_image_aids(gid_list))
print('[web] TEST 2')
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
print('[web] TEST 3')
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
print('[web] TEST 4')
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_quality_processed(aid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
aid_list_ = ut.filter_items(aid_list, flag_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value = request.args.get('value', None)
review = 'review' in request.args.keys()
finished = aid is None
# display_instructions = request.cookies.get('quality_instructions_seen', 0) == 0
display_instructions = False
if not finished:
gid = app.ibs.get_annot_gids(aid)
gpath = app.ibs.get_annot_chip_fpaths(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
return ap.template('turk', 'quality',
eid=eid,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
except Exception as e:
return error404(e)
@app.route('/submit/detection', methods=['POST'])
def submit_detection():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid = int(request.form['detection-gid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
# app.ibs.delete_images(gid)
# print('[web] (DELETED) turk_id: %s, gid: %d' % (turk_id, gid, ))
pass
elif method.lower() == 'clear':
aid_list = app.ibs.get_image_aids(gid)
app.ibs.delete_annots(aid_list)
print('[web] (CLEAERED) turk_id: %s, gid: %d' % (turk_id, gid, ))
redirection = request.referrer
if 'gid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&gid=%d' % (redirection, gid, )
else:
redirection = '%s?gid=%d' % (redirection, gid, )
return redirect(redirection)
else:
aid_list = app.ibs.get_image_aids(gid)
# Make new annotations
width, height = app.ibs.get_image_sizes(gid)
scale_factor = float(width) / 700.0
# Get aids
app.ibs.delete_annots(aid_list)
annotation_list = json.loads(request.form['detection-annotations'])
bbox_list = [
(
int(scale_factor * annot['left']),
int(scale_factor * annot['top']),
int(scale_factor * annot['width']),
int(scale_factor * annot['height']),
)
for annot in annotation_list
]
theta_list = [
float(annot['angle'])
for annot in annotation_list
]
species_list = [
annot['label']
for annot in annotation_list
]
app.ibs.add_annots([gid] * len(annotation_list), bbox_list, theta_list=theta_list, species_list=species_list)
app.ibs.set_image_reviewed([gid], [1])
print('[web] turk_id: %s, gid: %d, bbox_list: %r, species_list: %r' % (turk_id, gid, annotation_list, species_list))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_detection', eid=eid, previous=gid))
@app.route('/submit/viewpoint', methods=['POST'])
def submit_viewpoint():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['viewpoint-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
app.ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
value = int(request.form['viewpoint-value'])
def convert_old_viewpoint_to_yaw(view_angle):
''' we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0.000 * TAU,),
>>> ('frontleft' , 0.125 * TAU,),
>>> ('front' , 0.250 * TAU,),
>>> ('frontright' , 0.375 * TAU,),
>>> ('right' , 0.500 * TAU,),
>>> ('backright' , 0.625 * TAU,),
>>> ('back' , 0.750 * TAU,),
>>> ('backleft' , 0.875 * TAU,),
>>> ]
>>> fmtstr = 'old %15r %.2f -> new %15r %.2f'
>>> for lbl, angle in old_viewpoint_labels:
>>> print(fmtstr % (lbl, angle, lbl, convert_old_viewpoint_to_yaw(angle)))
'''
if view_angle is None:
return None
yaw = (-view_angle + (const.TAU / 2)) % const.TAU
return yaw
yaw = convert_old_viewpoint_to_yaw(ut.deg_to_rad(value))
app.ibs.set_annot_yaws([aid], [yaw], input_is_degrees=False)
print('[web] turk_id: %s, aid: %d, yaw: %d' % (turk_id, aid, yaw))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_viewpoint', eid=eid, previous=aid))
@app.route('/submit/quality', methods=['POST'])
def submit_quality():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['quality-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
app.ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
quality = int(request.form['quality-value'])
app.ibs.set_annot_qualities([aid], [quality])
print('[web] turk_id: %s, aid: %d, quality: %d' % (turk_id, aid, quality))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_quality', eid=eid, previous=aid))
@app.route('/ajax/cookie')
def set_cookie():
response = make_response('true')
try:
response.set_cookie(request.args['name'], request.args['value'])
print('[web] Set Cookie: %r -> %r' % (request.args['name'], request.args['value'], ))
return response
except:
print('[web] COOKIE FAILED: %r' % (request.args, ))
return make_response('false')
@app.route('/ajax/image/src/<gid>')
def image_src(gid=None):
# gpath = app.ibs.get_image_paths(gid)
gpath = app.ibs.get_image_thumbpath(gid, ensure_paths=True)
return ap.return_src(gpath)
@app.route('/ajax/annotation/src/<aid>')
def annotation_src(aid=None):
gpath = app.ibs.get_annot_chip_fpaths(aid)
return ap.return_src(gpath)
@app.route('/api')
@app.route('/api/<function>.json', methods=['GET', 'POST'])
def api(function=None):
template = {
'status': {
'success': False,
'code': '',
},
}
print('[web] Function:', function)
print('[web] POST:', dict(request.form))
print('[web] GET:', dict(request.args))
if function is None:
template['status']['success'] = True
template['status']['code'] = 'USAGE: /api/[ibeis_function_name].json'
else:
function = function.lower()
if ap.check_valid_function_name(function):
function = 'app.ibs.%s' % function
exists = True
try:
func = eval(function)
ret = func()
except AttributeError:
exists = False
if exists:
template['status']['success'] = True
template['function'] = function
template['return'] = ret
else:
template['status']['success'] = False
template['status']['code'] = 'ERROR: Specified IBEIS function not visible or implemented'
else:
template['status']['success'] = False
template['status']['code'] = 'ERROR: Specified IBEIS function not valid Python function'
return json.dumps(template)
@app.route('/404')
def error404(exception):
print('[web] %r' % (exception, ))
return ap.template(None, '404')
################################################################################
def start_tornado(app, port=5000, browser=BROWSER, blocking=False, reset_db=True):
def _start_tornado():
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start()
# Initialize the web server
logging.getLogger().setLevel(logging.INFO)
try:
app.server_ip_address = socket.gethostbyname(socket.gethostname())
app.port = port
except:
app.server_ip_address = '127.0.0.1'
app.port = port
url = 'http://%s:%s' % (app.server_ip_address, app.port)
print('[web] Tornado server starting at %s' % (url,))
if browser:
import webbrowser
webbrowser.open(url)
# Blocking
_start_tornado()
# if blocking:
# _start_tornado()
# else:
# import threading
# threading.Thread(target=_start_tornado).start()
def start_from_terminal():
'''
Parse command line options and start the server.
'''
parser = optparse.OptionParser()
parser.add_option(
'-p', '--port',
help='which port to serve content on',
type='int', default=DEFAULT_PORT)
parser.add_option(
'--db',
help='specify an IBEIS database',
type='str', default='testdb0')
opts, args = parser.parse_args()
app.ibs = ibeis.opendb(db=opts.db)
print('[web] Pre-computing all image thumbnails...')
app.ibs.compute_all_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
app.ibs.compute_all_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
app.ibs.compute_all_chips()
start_tornado(app, opts.port)
def start_from_ibeis(ibs, port=DEFAULT_PORT):
'''
Parse command line options and start the server.
'''
dbname = ibs.get_dbname()
if dbname == 'CHTA_Master':
app.default_species = Species.CHEETAH
elif dbname == 'ELPH_Master':
app.default_species = Species.ELEPHANT_SAV
elif dbname == 'GIR_Master':
app.default_species = Species.GIRAFFE
elif dbname == 'GZ_Master':
app.default_species = Species.ZEB_GREVY
elif dbname == 'LION_Master':
app.default_species = Species.LION
elif dbname == 'PZ_Master':
app.default_species = Species.ZEB_PLAIN
elif dbname == 'WD_Master':
app.default_species = Species.WILDDOG
elif 'NNP_' in dbname:
app.default_species = Species.ZEB_PLAIN
elif 'GZC' in dbname:
app.default_species = Species.ZEB_PLAIN
else:
app.default_species = None
print('[web] DEFAULT SPECIES: %r' % (app.default_species))
app.ibs = ibs
print('[web] Pre-computing all image thumbnails (with annots)...')
app.ibs.compute_all_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
app.ibs.compute_all_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
app.ibs.compute_all_chips()
start_tornado(app, port)
if __name__ == '__main__':
start_from_terminal()
|
from typing import Any, Dict, Tuple
from collections import OrderedDict
from django.views.generic import TemplateView
from django.conf import settings
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
from django.template import loader
import os
import random
import re
from zerver.lib.integrations import CATEGORIES, INTEGRATIONS, HubotIntegration, \
WebhookIntegration, EmailIntegration
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm
from zerver.templatetags.app_filters import render_markdown_path
from zerver.context_processors import zulip_default_context
def add_api_uri_context(context: Dict[str, Any], request: HttpRequest) -> None:
context.update(zulip_default_context(request))
subdomain = get_subdomain(request)
if (subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
or not settings.ROOT_DOMAIN_LANDING_PAGE):
display_subdomain = subdomain
html_settings_links = True
else:
display_subdomain = 'yourZulipDomain'
html_settings_links = False
display_host = Realm.host_for_subdomain(display_subdomain)
api_url_scheme_relative = display_host + "/api"
api_url = settings.EXTERNAL_URI_SCHEME + api_url_scheme_relative
zulip_url = settings.EXTERNAL_URI_SCHEME + display_host
context['external_uri_scheme'] = settings.EXTERNAL_URI_SCHEME
context['api_url'] = api_url
context['api_url_scheme_relative'] = api_url_scheme_relative
context['zulip_url'] = zulip_url
context["html_settings_links"] = html_settings_links
if html_settings_links:
settings_html = '<a href="/#settings">Zulip settings page</a>'
subscriptions_html = '<a target="_blank" href="/#streams">streams page</a>'
else:
settings_html = 'Zulip settings page'
subscriptions_html = 'streams page'
context['settings_html'] = settings_html
context['subscriptions_html'] = subscriptions_html
class ApiURLView(TemplateView):
def get_context_data(self, **kwargs: Any) -> Dict[str, str]:
context = super().get_context_data(**kwargs)
add_api_uri_context(context, self.request)
return context
class APIView(ApiURLView):
template_name = 'zerver/api.html'
class MarkdownDirectoryView(ApiURLView):
path_template = ""
def get_path(self, article: str) -> Tuple[str, int]:
http_status = 200
if article == "":
article = "index"
elif article == "include/sidebar_index":
pass
elif "/" in article:
article = "missing"
http_status = 404
elif len(article) > 100 or not re.match('^[0-9a-zA-Z_-]+$', article):
article = "missing"
http_status = 404
path = self.path_template % (article,)
try:
loader.get_template(path)
return (path, http_status)
except loader.TemplateDoesNotExist:
return (self.path_template % ("missing",), 404)
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
article = kwargs["article"]
context = super().get_context_data() # type: Dict[str, Any]
(context["article"], http_status_ignored) = self.get_path(article)
# For disabling the "Back to home" on the homepage
context["not_index_page"] = not context["article"].endswith("/index.md")
if self.path_template == '/zerver/help/%s.md':
context["page_is_help_center"] = True
context["doc_root"] = "/help/"
(sidebar_index, http_status_ignored) = self.get_path("include/sidebar_index")
# We want the sliding/collapsing behavior for /help pages only
sidebar_class = "sidebar slide"
title_base = "Zulip Help Center"
else:
context["page_is_api_center"] = True
context["doc_root"] = "/api/"
(sidebar_index, http_status_ignored) = self.get_path("sidebar_index")
sidebar_class = "sidebar"
title_base = "Zulip API Documentation"
# The following is a somewhat hacky approach to extract titles from articles.
# Hack: `context["article"] has a leading `/`, so we use + to add directories.
article_path = os.path.join(settings.DEPLOY_ROOT, 'templates') + context["article"]
if os.path.exists(article_path):
with open(article_path) as article_file:
first_line = article_file.readlines()[0]
# Strip the header and then use the first line to get the article title
article_title = first_line.strip().lstrip("# ")
if context["not_index_page"]:
context["OPEN_GRAPH_TITLE"] = "%s (%s)" % (article_title, title_base)
else:
context["OPEN_GRAPH_TITLE"] = title_base
self.request.placeholder_open_graph_description = (
"REPLACMENT_OPEN_GRAPH_DESCRIPTION_%s" % (int(2**24 * random.random()),))
context["OPEN_GRAPH_DESCRIPTION"] = self.request.placeholder_open_graph_description
context["sidebar_index"] = sidebar_index
context["sidebar_class"] = sidebar_class
# An "article" might require the api_uri_context to be rendered
api_uri_context = {} # type: Dict[str, Any]
add_api_uri_context(api_uri_context, self.request)
api_uri_context["run_content_validators"] = True
context["api_uri_context"] = api_uri_context
return context
def get(self, request: HttpRequest, article: str="") -> HttpResponse:
(path, http_status) = self.get_path(article)
result = super().get(self, article=article)
if http_status != 200:
result.status_code = http_status
return result
def add_integrations_context(context: Dict[str, Any]) -> None:
alphabetical_sorted_categories = OrderedDict(sorted(CATEGORIES.items()))
alphabetical_sorted_integration = OrderedDict(sorted(INTEGRATIONS.items()))
enabled_integrations_count = len(list(filter(lambda v: v.is_enabled(), INTEGRATIONS.values())))
# Subtract 1 so saying "Over X integrations" is correct. Then,
# round down to the nearest multiple of 10.
integrations_count_display = ((enabled_integrations_count - 1) // 10) * 10
context['categories_dict'] = alphabetical_sorted_categories
context['integrations_dict'] = alphabetical_sorted_integration
context['integrations_count_display'] = integrations_count_display
def add_integrations_open_graph_context(context: Dict[str, Any], request: HttpRequest) -> None:
path_name = request.path.rstrip('/').split('/')[-1]
description = ('Zulip comes with over a hundred native integrations out of the box, '
'and integrates with Zapier, IFTTT, and Hubot to provide hundreds more. '
'Connect the apps you use everyday to Zulip.')
if path_name in INTEGRATIONS:
integration = INTEGRATIONS[path_name]
context['OPEN_GRAPH_TITLE'] = 'Connect {name} to Zulip'.format(name=integration.display_name)
context['OPEN_GRAPH_DESCRIPTION'] = description
elif path_name in CATEGORIES:
category = CATEGORIES[path_name]
context['OPEN_GRAPH_TITLE'] = 'Connect your {category} tools to Zulip'.format(category=category)
context['OPEN_GRAPH_DESCRIPTION'] = description
elif path_name == 'integrations':
context['OPEN_GRAPH_TITLE'] = 'Connect the tools you use to Zulip'
context['OPEN_GRAPH_DESCRIPTION'] = description
class IntegrationView(ApiURLView):
template_name = 'zerver/integrations/index.html'
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
context = super().get_context_data(**kwargs) # type: Dict[str, Any]
add_integrations_context(context)
add_integrations_open_graph_context(context, self.request)
return context
@has_request_variables
def integration_doc(request: HttpRequest, integration_name: str=REQ(default=None)) -> HttpResponse:
if not request.is_ajax():
return HttpResponseNotFound()
try:
integration = INTEGRATIONS[integration_name]
except KeyError:
return HttpResponseNotFound()
context = {} # type: Dict[str, Any]
add_api_uri_context(context, request)
context['integration_name'] = integration.name
context['integration_display_name'] = integration.display_name
if hasattr(integration, 'stream_name'):
context['recommended_stream_name'] = integration.stream_name
if isinstance(integration, WebhookIntegration):
context['integration_url'] = integration.url[3:]
if isinstance(integration, HubotIntegration):
context['hubot_docs_url'] = integration.hubot_docs_url
if isinstance(integration, EmailIntegration):
context['email_gateway_example'] = settings.EMAIL_GATEWAY_EXAMPLE
doc_html_str = render_markdown_path(integration.doc, context)
return HttpResponse(doc_html_str)
views: Remove unused APIView class.
Signed-off-by: Anders Kaseorg <dfdb7392591db597bc41cf266a9c3bc12a2706e5@zulipchat.com>
from typing import Any, Dict, Tuple
from collections import OrderedDict
from django.views.generic import TemplateView
from django.conf import settings
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
from django.template import loader
import os
import random
import re
from zerver.lib.integrations import CATEGORIES, INTEGRATIONS, HubotIntegration, \
WebhookIntegration, EmailIntegration
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm
from zerver.templatetags.app_filters import render_markdown_path
from zerver.context_processors import zulip_default_context
def add_api_uri_context(context: Dict[str, Any], request: HttpRequest) -> None:
context.update(zulip_default_context(request))
subdomain = get_subdomain(request)
if (subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN
or not settings.ROOT_DOMAIN_LANDING_PAGE):
display_subdomain = subdomain
html_settings_links = True
else:
display_subdomain = 'yourZulipDomain'
html_settings_links = False
display_host = Realm.host_for_subdomain(display_subdomain)
api_url_scheme_relative = display_host + "/api"
api_url = settings.EXTERNAL_URI_SCHEME + api_url_scheme_relative
zulip_url = settings.EXTERNAL_URI_SCHEME + display_host
context['external_uri_scheme'] = settings.EXTERNAL_URI_SCHEME
context['api_url'] = api_url
context['api_url_scheme_relative'] = api_url_scheme_relative
context['zulip_url'] = zulip_url
context["html_settings_links"] = html_settings_links
if html_settings_links:
settings_html = '<a href="/#settings">Zulip settings page</a>'
subscriptions_html = '<a target="_blank" href="/#streams">streams page</a>'
else:
settings_html = 'Zulip settings page'
subscriptions_html = 'streams page'
context['settings_html'] = settings_html
context['subscriptions_html'] = subscriptions_html
class ApiURLView(TemplateView):
def get_context_data(self, **kwargs: Any) -> Dict[str, str]:
context = super().get_context_data(**kwargs)
add_api_uri_context(context, self.request)
return context
class MarkdownDirectoryView(ApiURLView):
path_template = ""
def get_path(self, article: str) -> Tuple[str, int]:
http_status = 200
if article == "":
article = "index"
elif article == "include/sidebar_index":
pass
elif "/" in article:
article = "missing"
http_status = 404
elif len(article) > 100 or not re.match('^[0-9a-zA-Z_-]+$', article):
article = "missing"
http_status = 404
path = self.path_template % (article,)
try:
loader.get_template(path)
return (path, http_status)
except loader.TemplateDoesNotExist:
return (self.path_template % ("missing",), 404)
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
article = kwargs["article"]
context = super().get_context_data() # type: Dict[str, Any]
(context["article"], http_status_ignored) = self.get_path(article)
# For disabling the "Back to home" on the homepage
context["not_index_page"] = not context["article"].endswith("/index.md")
if self.path_template == '/zerver/help/%s.md':
context["page_is_help_center"] = True
context["doc_root"] = "/help/"
(sidebar_index, http_status_ignored) = self.get_path("include/sidebar_index")
# We want the sliding/collapsing behavior for /help pages only
sidebar_class = "sidebar slide"
title_base = "Zulip Help Center"
else:
context["page_is_api_center"] = True
context["doc_root"] = "/api/"
(sidebar_index, http_status_ignored) = self.get_path("sidebar_index")
sidebar_class = "sidebar"
title_base = "Zulip API Documentation"
# The following is a somewhat hacky approach to extract titles from articles.
# Hack: `context["article"] has a leading `/`, so we use + to add directories.
article_path = os.path.join(settings.DEPLOY_ROOT, 'templates') + context["article"]
if os.path.exists(article_path):
with open(article_path) as article_file:
first_line = article_file.readlines()[0]
# Strip the header and then use the first line to get the article title
article_title = first_line.strip().lstrip("# ")
if context["not_index_page"]:
context["OPEN_GRAPH_TITLE"] = "%s (%s)" % (article_title, title_base)
else:
context["OPEN_GRAPH_TITLE"] = title_base
self.request.placeholder_open_graph_description = (
"REPLACMENT_OPEN_GRAPH_DESCRIPTION_%s" % (int(2**24 * random.random()),))
context["OPEN_GRAPH_DESCRIPTION"] = self.request.placeholder_open_graph_description
context["sidebar_index"] = sidebar_index
context["sidebar_class"] = sidebar_class
# An "article" might require the api_uri_context to be rendered
api_uri_context = {} # type: Dict[str, Any]
add_api_uri_context(api_uri_context, self.request)
api_uri_context["run_content_validators"] = True
context["api_uri_context"] = api_uri_context
return context
def get(self, request: HttpRequest, article: str="") -> HttpResponse:
(path, http_status) = self.get_path(article)
result = super().get(self, article=article)
if http_status != 200:
result.status_code = http_status
return result
def add_integrations_context(context: Dict[str, Any]) -> None:
alphabetical_sorted_categories = OrderedDict(sorted(CATEGORIES.items()))
alphabetical_sorted_integration = OrderedDict(sorted(INTEGRATIONS.items()))
enabled_integrations_count = len(list(filter(lambda v: v.is_enabled(), INTEGRATIONS.values())))
# Subtract 1 so saying "Over X integrations" is correct. Then,
# round down to the nearest multiple of 10.
integrations_count_display = ((enabled_integrations_count - 1) // 10) * 10
context['categories_dict'] = alphabetical_sorted_categories
context['integrations_dict'] = alphabetical_sorted_integration
context['integrations_count_display'] = integrations_count_display
def add_integrations_open_graph_context(context: Dict[str, Any], request: HttpRequest) -> None:
path_name = request.path.rstrip('/').split('/')[-1]
description = ('Zulip comes with over a hundred native integrations out of the box, '
'and integrates with Zapier, IFTTT, and Hubot to provide hundreds more. '
'Connect the apps you use everyday to Zulip.')
if path_name in INTEGRATIONS:
integration = INTEGRATIONS[path_name]
context['OPEN_GRAPH_TITLE'] = 'Connect {name} to Zulip'.format(name=integration.display_name)
context['OPEN_GRAPH_DESCRIPTION'] = description
elif path_name in CATEGORIES:
category = CATEGORIES[path_name]
context['OPEN_GRAPH_TITLE'] = 'Connect your {category} tools to Zulip'.format(category=category)
context['OPEN_GRAPH_DESCRIPTION'] = description
elif path_name == 'integrations':
context['OPEN_GRAPH_TITLE'] = 'Connect the tools you use to Zulip'
context['OPEN_GRAPH_DESCRIPTION'] = description
class IntegrationView(ApiURLView):
template_name = 'zerver/integrations/index.html'
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
context = super().get_context_data(**kwargs) # type: Dict[str, Any]
add_integrations_context(context)
add_integrations_open_graph_context(context, self.request)
return context
@has_request_variables
def integration_doc(request: HttpRequest, integration_name: str=REQ(default=None)) -> HttpResponse:
if not request.is_ajax():
return HttpResponseNotFound()
try:
integration = INTEGRATIONS[integration_name]
except KeyError:
return HttpResponseNotFound()
context = {} # type: Dict[str, Any]
add_api_uri_context(context, request)
context['integration_name'] = integration.name
context['integration_display_name'] = integration.display_name
if hasattr(integration, 'stream_name'):
context['recommended_stream_name'] = integration.stream_name
if isinstance(integration, WebhookIntegration):
context['integration_url'] = integration.url[3:]
if isinstance(integration, HubotIntegration):
context['hubot_docs_url'] = integration.hubot_docs_url
if isinstance(integration, EmailIntegration):
context['email_gateway_example'] = settings.EMAIL_GATEWAY_EXAMPLE
doc_html_str = render_markdown_path(integration.doc, context)
return HttpResponse(doc_html_str)
|
# -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
"""
from __future__ import absolute_import, division, print_function
import random
from os.path import join, exists, splitext, basename
import zipfile
import time
import math
import tornado.wsgi
import tornado.httpserver
from flask import request, redirect, url_for, make_response, current_app
import logging
import socket
import simplejson as json
from ibeis.control import controller_inject
from ibeis.control.SQLDatabaseControl import (SQLDatabaseController, # NOQA
SQLAtomicContext)
import ibeis.constants as const
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY, Species, PI, TAU
from ibeis.web import appfuncs as ap
from ibeis.web import zmq_task_queue # NOQA
import utool as ut
DEFAULT_WEB_API_PORT = ut.get_argval('--port', type_=int, default=5000)
register_api = controller_inject.get_ibeis_flask_api(__name__)
register_route = controller_inject.get_ibeis_flask_route(__name__)
BROWSER = ut.get_argflag('--browser')
PAGE_SIZE = 500
################################################################################
def default_species(ibs):
# hack function
dbname = ibs.get_dbname()
if dbname == 'CHTA_Master':
default_species = Species.CHEETAH
elif dbname == 'ELPH_Master':
default_species = Species.ELEPHANT_SAV
elif dbname == 'GIR_Master':
default_species = Species.GIRAFFE
elif dbname == 'GZ_Master':
default_species = Species.ZEB_GREVY
elif dbname == 'LION_Master':
default_species = Species.LION
elif dbname == 'PZ_Master':
default_species = Species.ZEB_PLAIN
elif dbname == 'WD_Master':
default_species = Species.WILDDOG
elif dbname == 'NNP_MasterGIRM':
default_species = Species.GIRAFFE_MASAI
elif 'NNP_' in dbname:
default_species = Species.ZEB_PLAIN
elif 'GZC' in dbname:
default_species = Species.ZEB_PLAIN
else:
default_species = None
print('[web] DEFAULT SPECIES: %r' % (default_species))
return default_species
def encounter_image_processed(ibs, gid_list):
images_reviewed = [ reviewed == 1 for reviewed in ibs.get_image_reviewed(gid_list) ]
return images_reviewed
def encounter_annot_viewpoint_processed(ibs, aid_list):
annots_reviewed = [ reviewed is not None for reviewed in ibs.get_annot_yaws(aid_list) ]
return annots_reviewed
def encounter_annot_quality_processed(ibs, aid_list):
annots_reviewed = [ reviewed is not None and reviewed is not -1 for reviewed in ibs.get_annot_qualities(aid_list) ]
return annots_reviewed
def encounter_annot_additional_processed(ibs, aid_list, nid_list):
sex_list = ibs.get_annot_sex(aid_list)
age_list = ibs.get_annot_age_months_est(aid_list)
annots_reviewed = [
(nid < 0) or (nid > 0 and sex >= 0 and -1 not in list(age) and list(age).count(None) < 2)
for nid, sex, age in zip(nid_list, sex_list, age_list)
]
return annots_reviewed
def convert_old_viewpoint_to_yaw(view_angle):
""" we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0, 0.000 * TAU,),
>>> ('frontleft' , 45, 0.125 * TAU,),
>>> ('front' , 90, 0.250 * TAU,),
>>> ('frontright' , 135, 0.375 * TAU,),
>>> ('right' , 180, 0.500 * TAU,),
>>> ('backright' , 225, 0.625 * TAU,),
>>> ('back' , 270, 0.750 * TAU,),
>>> ('backleft' , 315, 0.875 * TAU,),
>>> ]
>>> fmtstr = 'old %15r %.2f -> new %15r %.2f'
>>> for lbl, angle, radians in old_viewpoint_labels:
>>> print(fmtstr % (lbl, angle, lbl, convert_old_viewpoint_to_yaw(angle)))
"""
if view_angle is None:
return None
view_angle = ut.deg_to_rad(view_angle)
yaw = (-view_angle + (TAU / 2)) % TAU
return yaw
def convert_yaw_to_old_viewpoint(yaw):
""" we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0, 0.000 * TAU,),
>>> ('frontleft' , 45, 0.125 * TAU,),
>>> ('front' , 90, 0.250 * TAU,),
>>> ('frontright' , 135, 0.375 * TAU,),
>>> ('right' , 180, 0.500 * TAU,),
>>> ('backright' , 225, 0.625 * TAU,),
>>> ('back' , 270, 0.750 * TAU,),
>>> ('backleft' , 315, 0.875 * TAU,),
>>> ]
>>> fmtstr = 'original_angle %15r %.2f -> yaw %15r %.2f -> reconstructed_angle %15r %.2f'
>>> for lbl, angle, radians in old_viewpoint_labels:
>>> yaw = convert_old_viewpoint_to_yaw(angle)
>>> reconstructed_angle = convert_yaw_to_old_viewpoint(yaw)
>>> print(fmtstr % (lbl, angle, lbl, yaw, lbl, reconstructed_angle))
"""
if yaw is None:
return None
view_angle = ((TAU / 2) - yaw) % TAU
view_angle = ut.rad_to_deg(view_angle)
return view_angle
################################################################################
@register_route('/')
def root():
return ap.template(None)
@register_route('/view')
def view():
def _date_list(gid_list):
unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(unixtime)
if unixtime is not None else
'UNKNOWN'
for unixtime in unixtime_list
]
datetime_split_list = [ datetime.split(' ') for datetime in datetime_list ]
date_list = [ datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN' for datetime_split in datetime_split_list ]
return date_list
ibs = current_app.ibs
aid_list = ibs.filter_aids_count()
gid_list = ibs.get_annot_gids(aid_list)
nid_list = ibs.get_annot_name_rowids(aid_list)
date_list = _date_list(gid_list)
gid_list_unique = list(set(gid_list))
date_list_unique = _date_list(gid_list_unique)
date_taken_dict = {}
for gid, date in zip(gid_list_unique, date_list_unique):
if date not in date_taken_dict:
date_taken_dict[date] = [0, 0]
date_taken_dict[date][1] += 1
gid_list_all = ibs.get_valid_gids()
date_list_all = _date_list(gid_list_all)
for gid, date in zip(gid_list_all, date_list_all):
if date in date_taken_dict:
date_taken_dict[date][0] += 1
value = 0
label_list = []
value_list = []
index_list = []
seen_set = set()
current_seen_set = set()
previous_seen_set = set()
last_date = None
date_seen_dict = {}
for index, (aid, nid, date) in enumerate(zip(aid_list, nid_list, date_list)):
index_list.append(index + 1)
# Add to counters
if date not in date_seen_dict:
date_seen_dict[date] = [0, 0, 0, 0]
date_seen_dict[date][0] += 1
if nid not in current_seen_set:
current_seen_set.add(nid)
date_seen_dict[date][1] += 1
if nid in previous_seen_set:
date_seen_dict[date][3] += 1
if nid not in seen_set:
seen_set.add(nid)
value += 1
date_seen_dict[date][2] += 1
# Add to register
value_list.append(value)
# Reset step (per day)
if date != last_date and date != 'UNKNOWN':
last_date = date
previous_seen_set = set(current_seen_set)
current_seen_set = set()
label_list.append(date)
else:
label_list.append('')
# def optimization1(x, a, b, c):
# return a * np.log(b * x) + c
# def optimization2(x, a, b, c):
# return a * np.sqrt(x) ** b + c
# def optimization3(x, a, b, c):
# return 1.0 / (a * np.exp(-b * x) + c)
# def process(func, opts, domain, zero_index, zero_value):
# values = func(domain, *opts)
# diff = values[zero_index] - zero_value
# values -= diff
# values[ values < 0.0 ] = 0.0
# values[:zero_index] = 0.0
# values = values.astype(int)
# return list(values)
# optimization_funcs = [
# optimization1,
# optimization2,
# optimization3,
# ]
# # Get data
# x = np.array(index_list)
# y = np.array(value_list)
# # Fit curves
# end = int(len(index_list) * 1.25)
# domain = np.array(range(1, end))
# zero_index = len(value_list) - 1
# zero_value = value_list[zero_index]
# regressed_opts = [ curve_fit(func, x, y)[0] for func in optimization_funcs ]
# prediction_list = [
# process(func, opts, domain, zero_index, zero_value)
# for func, opts in zip(optimization_funcs, regressed_opts)
# ]
# index_list = list(domain)
prediction_list = []
date_seen_dict.pop('UNKNOWN', None)
bar_label_list = sorted(date_seen_dict.keys())
bar_value_list1 = [ date_taken_dict[date][0] for date in bar_label_list ]
bar_value_list2 = [ date_taken_dict[date][1] for date in bar_label_list ]
bar_value_list3 = [ date_seen_dict[date][0] for date in bar_label_list ]
bar_value_list4 = [ date_seen_dict[date][1] for date in bar_label_list ]
bar_value_list5 = [ date_seen_dict[date][2] for date in bar_label_list ]
bar_value_list6 = [ date_seen_dict[date][3] for date in bar_label_list ]
# label_list += ['Models'] + [''] * (len(index_list) - len(label_list) - 1)
# value_list += [0] * (len(index_list) - len(value_list))
# Counts
eid_list = ibs.get_valid_eids()
gid_list = ibs.get_valid_gids()
aid_list = ibs.get_valid_aids()
nid_list = ibs.get_valid_nids()
contrib_list = ibs.get_valid_contrib_rowids()
# nid_list = ibs.get_valid_nids()
aid_list_count = ibs.filter_aids_count()
# gid_list_count = list(set(ibs.get_annot_gids(aid_list_count)))
nid_list_count_dup = ibs.get_annot_name_rowids(aid_list_count)
nid_list_count = list(set(nid_list_count_dup))
# Calculate the Petersen-Lincoln index form the last two days
try:
c1 = bar_value_list4[-2]
c2 = bar_value_list4[-1]
c3 = bar_value_list6[-1]
pl_index = int(math.ceil( (c1 * c2) / c3 ))
pl_error_num = float(c1 * c1 * c2 * (c2 - c3))
pl_error_dom = float(c3 ** 3)
pl_error = int(math.ceil( 1.96 * math.sqrt(pl_error_num / pl_error_dom) ))
except IndexError:
# pl_index = 'Undefined - Zero recaptured (k = 0)'
pl_index = 0
pl_error = 0
except ZeroDivisionError:
# pl_index = 'Undefined - Zero recaptured (k = 0)'
pl_index = 0
pl_error = 0
# Get the markers
gid_list_markers = ibs.get_annot_gids(aid_list_count)
gps_list_markers = map(list, ibs.get_image_gps(gid_list_markers))
gps_list_markers_all = map(list, ibs.get_image_gps(gid_list))
REMOVE_DUP_CODE = True
if not REMOVE_DUP_CODE:
# Get the tracks
nid_track_dict = ut.ddict(list)
for nid, gps in zip(nid_list_count_dup, gps_list_markers):
if gps[0] == -1.0 and gps[1] == -1.0:
continue
nid_track_dict[nid].append(gps)
gps_list_tracks = [ nid_track_dict[nid] for nid in sorted(nid_track_dict.keys()) ]
else:
__nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(aid_list=aid_list_count)
gps_list_tracks = list(map(lambda x: list(map(list, x)), gps_track_list))
valid_aids = ibs.get_valid_aids()
valid_gids = ibs.get_valid_gids()
valid_aids_ = ibs.filter_aids_custom(valid_aids)
valid_gids_ = ibs.filter_gids_custom(valid_gids)
used_gids = list(set( ibs.get_annot_gids(valid_aids) ))
used_contrib_tags = list(set( ibs.get_image_contributor_tag(used_gids) ))
# Get Age and sex (By Annot)
# annot_sex_list = ibs.get_annot_sex(valid_aids_)
# annot_age_months_est_min = ibs.get_annot_age_months_est_min(valid_aids_)
# annot_age_months_est_max = ibs.get_annot_age_months_est_max(valid_aids_)
# age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
# for sex, min_age, max_age in zip(annot_sex_list, annot_age_months_est_min, annot_age_months_est_max):
# if sex not in [0, 1]:
# sex = 2
# # continue
# if (min_age is None or min_age < 12) and max_age < 12:
# age_list[sex][0] += 1
# elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
# age_list[sex][1] += 1
# elif 36 <= min_age and (36 <= max_age or max_age is None):
# age_list[sex][2] += 1
# Get Age and sex (By Name)
name_sex_list = ibs.get_name_sex(nid_list_count)
name_age_months_est_mins_list = ibs.get_name_age_months_est_min(nid_list_count)
name_age_months_est_maxs_list = ibs.get_name_age_months_est_max(nid_list_count)
age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
age_unreviewed = 0
age_ambiguous = 0
for nid, sex, min_ages, max_ages in zip(nid_list_count, name_sex_list, name_age_months_est_mins_list, name_age_months_est_maxs_list):
if len(set(min_ages)) > 1 or len(set(max_ages)) > 1:
# print('[web] Invalid name %r: Cannot have more than one age' % (nid, ))
age_ambiguous += 1
continue
min_age = None
max_age = None
if len(min_ages) > 0:
min_age = min_ages[0]
if len(max_ages) > 0:
max_age = max_ages[0]
# Histogram
if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1):
# print('[web] Unreviewded name %r: Specify the age for the name' % (nid, ))
age_unreviewed += 1
continue
if sex not in [0, 1]:
sex = 2
# continue
if (min_age is None or min_age < 12) and max_age < 12:
age_list[sex][0] += 1
elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
age_list[sex][1] += 1
elif 36 <= min_age and (36 <= max_age or max_age is None):
age_list[sex][2] += 1
dbinfo_str = dbinfo()
return ap.template('view',
line_index_list=index_list,
line_label_list=label_list,
line_value_list=value_list,
prediction_list=prediction_list,
pl_index=pl_index,
pl_error=pl_error,
gps_list_markers=gps_list_markers,
gps_list_markers_all=gps_list_markers_all,
gps_list_tracks=gps_list_tracks,
bar_label_list=bar_label_list,
bar_value_list1=bar_value_list1,
bar_value_list2=bar_value_list2,
bar_value_list3=bar_value_list3,
bar_value_list4=bar_value_list4,
bar_value_list5=bar_value_list5,
bar_value_list6=bar_value_list6,
age_list=age_list,
age_ambiguous=age_ambiguous,
age_unreviewed=age_unreviewed,
dbinfo_str=dbinfo_str,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
contrib_list=contrib_list,
contrib_list_str=','.join(map(str, contrib_list)),
num_contribs=len(contrib_list),
gid_list_count=valid_gids_,
gid_list_count_str=','.join(map(str, valid_gids_)),
num_gids_count=len(valid_gids_),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
aid_list_count=valid_aids_,
aid_list_count_str=','.join(map(str, valid_aids_)),
num_aids_count=len(valid_aids_),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
nid_list_count=nid_list_count,
nid_list_count_str=','.join(map(str, nid_list_count)),
num_nids_count=len(nid_list_count),
used_gids=used_gids,
num_used_gids=len(used_gids),
used_contribs=used_contrib_tags,
num_used_contribs=len(used_contrib_tags))
@register_route('/view/encounters')
def view_encounters():
ibs = current_app.ibs
filtered = True
eid = request.args.get('eid', '')
if len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
else:
eid_list = ibs.get_valid_eids()
filtered = False
start_time_posix_list = ibs.get_encounter_start_time_posix(eid_list)
datetime_list = [
ut.unixtime_to_datetimestr(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ]
aids_list = [ ut.flatten(ibs.get_image_aids(gid_list)) for gid_list in gids_list ]
images_reviewed_list = [ encounter_image_processed(ibs, gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ encounter_annot_viewpoint_processed(ibs, aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ encounter_annot_quality_processed(ibs, aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
encounter_list = zip(
eid_list,
ibs.get_encounter_text(eid_list),
ibs.get_encounter_num_gids(eid_list),
image_processed_list,
ibs.get_encounter_num_aids(eid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
encounter_list.sort(key=lambda t: t[7])
return ap.template('view', 'encounters',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
encounter_list=encounter_list,
num_encounters=len(encounter_list))
@register_route('/view/images')
def view_images():
ibs = current_app.ibs
filtered = True
eid_list = []
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
page = max(0, int(request.args.get('page', 1)))
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(eid=eid) for eid_ in eid_list ])
else:
gid_list = ibs.get_valid_gids()
filtered = False
# Page
page_start = min(len(gid_list), (page - 1) * PAGE_SIZE)
page_end = min(len(gid_list), page * PAGE_SIZE)
page_total = int(math.ceil(len(gid_list) / PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(gid_list) else page + 1
gid_list = gid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(gid_list), page_previous, page_next, ))
image_unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ ','.join(map(str, eid_list_)) for eid_list_ in ibs.get_image_eids(gid_list) ],
ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
ibs.get_image_gps(gid_list),
ibs.get_image_party_tag(gid_list),
ibs.get_image_contributor_tag(gid_list),
ibs.get_image_notes(gid_list),
encounter_image_processed(ibs, gid_list),
)
image_list.sort(key=lambda t: t[3])
return ap.template('view', 'images',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/annotations')
def view_annotations():
ibs = current_app.ibs
filtered = True
eid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
page = max(0, int(request.args.get('page', 1)))
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
else:
aid_list = ibs.get_valid_aids()
filtered = False
# Page
page_start = min(len(aid_list), (page - 1) * PAGE_SIZE)
page_end = min(len(aid_list), page * PAGE_SIZE)
page_total = int(math.ceil(len(aid_list) / PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(aid_list) else page + 1
aid_list = aid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(aid_list), page_previous, page_next, ))
annotation_list = zip(
aid_list,
ibs.get_annot_gids(aid_list),
[ ','.join(map(str, eid_list_)) for eid_list_ in ibs.get_annot_eids(aid_list) ],
ibs.get_annot_image_names(aid_list),
ibs.get_annot_names(aid_list),
ibs.get_annot_exemplar_flags(aid_list),
ibs.get_annot_species_texts(aid_list),
ibs.get_annot_yaw_texts(aid_list),
ibs.get_annot_quality_texts(aid_list),
ibs.get_annot_sex_texts(aid_list),
ibs.get_annot_age_months_est(aid_list),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(encounter_annot_viewpoint_processed(ibs, aid_list), encounter_annot_quality_processed(ibs, aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return ap.template('view', 'annotations',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/names')
def view_names():
ibs = current_app.ibs
filtered = True
aid_list = []
eid_list = []
gid_list = []
nid = request.args.get('nid', '')
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
page = max(0, int(request.args.get('page', 1)))
if len(nid) > 0:
nid_list = nid.strip().split(',')
nid_list = [ None if nid_ == 'None' or nid_ == '' else int(nid_) for nid_ in nid_list ]
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
else:
nid_list = ibs.get_valid_nids()
filtered = False
# Page
PAGE_SIZE_ = int(PAGE_SIZE / 5)
page_start = min(len(nid_list), (page - 1) * PAGE_SIZE_)
page_end = min(len(nid_list), page * PAGE_SIZE_)
page_total = int(math.ceil(len(nid_list) / PAGE_SIZE_))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(nid_list) else page + 1
nid_list = nid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(nid_list), page_previous, page_next, ))
aids_list = ibs.get_name_aids(nid_list)
annotations_list = [ zip(
aid_list_,
ibs.get_annot_gids(aid_list_),
[ ','.join(map(str, eid_list_)) for eid_list_ in ibs.get_annot_eids(aid_list_) ],
ibs.get_annot_image_names(aid_list_),
ibs.get_annot_names(aid_list_),
ibs.get_annot_exemplar_flags(aid_list_),
ibs.get_annot_species_texts(aid_list_),
ibs.get_annot_yaw_texts(aid_list_),
ibs.get_annot_quality_texts(aid_list_),
ibs.get_annot_sex_texts(aid_list_),
ibs.get_annot_age_months_est(aid_list_),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(encounter_annot_viewpoint_processed(ibs, aid_list_), encounter_annot_quality_processed(ibs, aid_list_)) ],
) for aid_list_ in aids_list ]
name_list = zip(
nid_list,
annotations_list
)
name_list.sort(key=lambda t: t[0])
return ap.template('view', 'names',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
name_list=name_list,
num_names=len(name_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/turk')
def turk():
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
return ap.template('turk', None, eid=eid)
@register_route('/turk/detection')
def turk_detection():
ibs = current_app.ibs
refer_aid = request.args.get('refer_aid', None)
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(ibs, gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
enctext = None if eid is None else ibs.get_encounter_text(eid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list_ = ut.filterfalse_items(gid_list, reviewed_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('detection_instructions_seen', 1) == 0
display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0
if not finished:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = ibs.get_image_sizes(gid)
scale_factor = float(ap.TARGET_WIDTH) / float(width)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = int(scale_factor * annot_bbox[0])
temp['top'] = int(scale_factor * annot_bbox[1])
temp['width'] = int(scale_factor * (annot_bbox[2]))
temp['height'] = int(scale_factor * (annot_bbox[3]))
temp['label'] = species
temp['id'] = aid
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif default_species(ibs) is not None:
species = default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
return ap.template('turk', 'detection',
eid=eid,
gid=gid,
refer_aid=refer_aid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
review=review)
def get_turk_annot_args(is_reviewed_func):
"""
Helper to return aids in an encounter or a group review
"""
ibs = current_app.ibs
def _ensureid(_id):
return None if _id == 'None' or _id == '' else int(_id)
eid = request.args.get('eid', '')
src_ag = request.args.get('src_ag', '')
dst_ag = request.args.get('dst_ag', '')
eid = _ensureid(eid)
src_ag = _ensureid(src_ag)
dst_ag = _ensureid(dst_ag)
group_review_flag = src_ag is not None and dst_ag is not None
if not group_review_flag:
gid_list = ibs.get_valid_gids(eid=eid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
reviewed_list = is_reviewed_func(ibs, aid_list)
else:
src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag)
dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag)
src_aid_list = ibs.get_gar_aid(src_gar_rowid_list)
dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list)
aid_list = src_aid_list
reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ]
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
if group_review_flag:
aid = aid_list_[0]
else:
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
print('aid = %r' % (aid,))
#print(ut.dict_str(ibs.get_annot_info(aid)))
print(ut.obj_str(ibs.get_annot_info(aid, default=True, nl=True)))
return aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous
@register_route('/turk/viewpoint')
def turk_viewpoint():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = get_turk_annot_args(encounter_annot_viewpoint_processed)
(aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous) = tup
value = convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('viewpoint_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
enctext = ibs.get_encounter_text(eid)
return ap.template('turk', 'viewpoint',
eid=eid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/quality')
def turk_quality():
"""
PZ Needs Tags:
17242
14468
14427
15946
14771
14084
4102
6074
3409
GZ Needs Tags;
1302
CommandLine:
python -m ibeis.web.app --exec-turk_quality --db PZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GIRM_Master1
Example:
>>> # SCRIPT
>>> from ibeis.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid_list_ = ibs.find_unlabeled_name_members(qual=True)
>>> valid_views = ['primary', 'primary1', 'primary-1']
>>> aid_list = ibs.filter_aids_to_viewpoint(aid_list_, valid_views, unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = get_turk_annot_args(encounter_annot_quality_processed)
(aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous) = tup
value = ibs.get_annot_qualities(aid)
if value == -1:
value = None
if value == 0:
value = 1
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('quality_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
enctext = ibs.get_encounter_text(eid)
return ap.template('turk', 'quality',
eid=eid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
##@register_route('/turk/viewpoint')
#def old_turk_viewpoint():
# #ibs = current_app.ibs
# #eid = request.args.get('eid', '')
# #eid = None if eid == 'None' or eid == '' else int(eid)
# #enctext = None if eid is None else ibs.get_encounter_text(eid)
# #src_ag = request.args.get('src_ag', '')
# #src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
# #dst_ag = request.args.get('dst_ag', '')
# #dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
# #group_review_flag = src_ag is not None and dst_ag is not None
# #if not group_review_flag:
# # gid_list = ibs.get_valid_gids(eid=eid)
# # aid_list = ut.flatten(ibs.get_image_aids(gid_list))
# # reviewed_list = encounter_annot_viewpoint_processed(ibs, aid_list)
# #else:
# # src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag)
# # dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag)
# # src_aid_list = ibs.get_gar_aid(src_gar_rowid_list)
# # dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list)
# # aid_list = src_aid_list
# # reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ]
# #previous = request.args.get('previous', None)
# ibs = current_app.ibs
# tup = get_turk_annot_args(encounter_annot_viewpoint_processed)
# (aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous) = tup
# value = convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
# review = 'review' in request.args.keys()
# finished = aid is None
# display_instructions = request.cookies.get('viewpoint_instructions_seen', 1) == 0
# if not finished:
# gid = ibs.get_annot_gids(aid)
# gpath = ibs.get_annot_chip_fpath(aid)
# image = ap.open_oriented_image(gpath)
# image_src = ap.embed_image_html(image)
# else:
# gid = None
# gpath = None
# image_src = None
# enctext = ibs.get_encounter_text(eid)
# return ap.template('turk', 'viewpoint',
# eid=eid,
# src_ag=src_ag,
# dst_ag=dst_ag,
# gid=gid,
# aid=aid,
# value=value,
# image_path=gpath,
# image_src=image_src,
# previous=previous,
# enctext=enctext,
# progress=progress,
# finished=finished,
# display_instructions=display_instructions,
# review=review)
#@register_route('/turk/quality')
#def old_turk_quality():
# #ibs = current_app.ibs
# #eid = request.args.get('eid', '')
# #eid = None if eid == 'None' or eid == '' else int(eid)
# #gid_list = ibs.get_valid_gids(eid=eid)
# #aid_list = ut.flatten(ibs.get_image_aids(gid_list))
# #reviewed_list = encounter_annot_quality_processed(ibs, aid_list)
# #try:
# # progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
# #except ZeroDivisionError:
# # progress = '0.00'
# #aid = request.args.get('aid', '')
# #if len(aid) > 0:
# # aid = int(aid)
# #else:
# # aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
# # if len(aid_list_) == 0:
# # aid = None
# # else:
# # # aid = aid_list_[0]
# # aid = random.choice(aid_list_)
# #previous = request.args.get('previous', None)
# ibs = current_app.ibs
# tup = get_turk_annot_args(encounter_annot_quality_processed)
# (aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous) = tup
# value = ibs.get_annot_qualities(aid)
# if value == -1:
# value = None
# if value == 0:
# value = 1
# review = 'review' in request.args.keys()
# finished = aid is None
# display_instructions = request.cookies.get('quality_instructions_seen', 1) == 0
# if not finished:
# gid = ibs.get_annot_gids(aid)
# gpath = ibs.get_annot_chip_fpath(aid)
# image = ap.open_oriented_image(gpath)
# image_src = ap.embed_image_html(image)
# else:
# gid = None
# gpath = None
# image_src = None
# enctext = ibs.get_encounter_text(eid)
# return ap.template('turk', 'quality',
# eid=eid,
# src_ag=src_ag,
# dst_ag=dst_ag,
# gid=gid,
# aid=aid,
# value=value,
# image_path=gpath,
# image_src=image_src,
# previous=previous,
# enctext=enctext,
# progress=progress,
# finished=finished,
# display_instructions=display_instructions,
# review=review)
@register_route('/turk/additional')
def turk_additional():
ibs = current_app.ibs
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = ibs.get_valid_gids(eid=eid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_nids(aid_list)
reviewed_list = encounter_annot_additional_processed(ibs, aid_list, nid_list)
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
enctext = None if eid is None else ibs.get_encounter_text(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value_sex = ibs.get_annot_sex([aid])[0]
if value_sex >= 0:
value_sex += 2
else:
value_sex = None
value_age_min, value_age_max = ibs.get_annot_age_months_est([aid])[0]
value_age = None
if (value_age_min is -1 or value_age_min is None) and (value_age_max is -1 or value_age_max is None):
value_age = 1
if (value_age_min is 0 or value_age_min is None) and value_age_max == 2:
value_age = 2
elif value_age_min is 3 and value_age_max == 5:
value_age = 3
elif value_age_min is 6 and value_age_max == 11:
value_age = 4
elif value_age_min is 12 and value_age_max == 23:
value_age = 5
elif value_age_min is 24 and value_age_max == 35:
value_age = 6
elif value_age_min is 36 and (value_age_max > 36 or value_age_max is None):
value_age = 7
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('additional_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
name_aid_list = None
nid = ibs.get_annot_name_rowids(aid)
if nid is not None:
name_aid_list = ibs.get_name_aids(nid)
quality_list = ibs.get_annot_qualities(name_aid_list)
quality_text_list = ibs.get_annot_quality_texts(name_aid_list)
yaw_text_list = ibs.get_annot_yaw_texts(name_aid_list)
name_aid_combined_list = list(zip(
name_aid_list,
quality_list,
quality_text_list,
yaw_text_list,
))
name_aid_combined_list.sort(key=lambda t: t[1], reverse=True)
return ap.template('turk', 'additional',
eid=eid,
gid=gid,
aid=aid,
value_sex=value_sex,
value_age=value_age,
image_path=gpath,
name_aid_combined_list=name_aid_combined_list,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/submit/detection', methods=['POST'])
def submit_detection():
ibs = current_app.ibs
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid = int(request.form['detection-gid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
# ibs.delete_images(gid)
# print('[web] (DELETED) turk_id: %s, gid: %d' % (turk_id, gid, ))
pass
elif method.lower() == 'clear':
aid_list = ibs.get_image_aids(gid)
ibs.delete_annots(aid_list)
print('[web] (CLEAERED) turk_id: %s, gid: %d' % (turk_id, gid, ))
redirection = request.referrer
if 'gid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&gid=%d' % (redirection, gid, )
else:
redirection = '%s?gid=%d' % (redirection, gid, )
return redirect(redirection)
else:
current_aid_list = ibs.get_image_aids(gid)
# Make new annotations
width, height = ibs.get_image_sizes(gid)
scale_factor = float(width) / float(ap.TARGET_WIDTH)
# Get aids
annotation_list = json.loads(request.form['detection-annotations'])
bbox_list = [
(
int(scale_factor * annot['left']),
int(scale_factor * annot['top']),
int(scale_factor * annot['width']),
int(scale_factor * annot['height']),
)
for annot in annotation_list
]
theta_list = [
float(annot['angle'])
for annot in annotation_list
]
survived_aid_list = [
None if annot['id'] is None else int(annot['id'])
for annot in annotation_list
]
species_list = [
annot['label']
for annot in annotation_list
]
# Delete annotations that didn't survive
kill_aid_list = list(set(current_aid_list) - set(survived_aid_list))
ibs.delete_annots(kill_aid_list)
for aid, bbox, theta, species in zip(survived_aid_list, bbox_list, theta_list, species_list):
if aid is None:
ibs.add_annots([gid], [bbox], theta_list=[theta], species_list=[species])
else:
ibs.set_annot_bboxes([aid], [bbox])
ibs.set_annot_thetas([aid], [theta])
ibs.set_annot_species([aid], [species])
ibs.set_image_reviewed([gid], [1])
print('[web] turk_id: %s, gid: %d, bbox_list: %r, species_list: %r' % (turk_id, gid, annotation_list, species_list))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_detection', eid=eid, previous=gid))
def movegroup_aid(ibs, aid, src_ag, dst_ag):
gar_rowid_list = ibs.get_annot_gar_rowids(aid)
annotgroup_rowid_list = ibs.get_gar_annotgroup_rowid(gar_rowid_list)
src_index = annotgroup_rowid_list.index(src_ag)
src_gar_rowid = gar_rowid_list[src_index]
vals = (aid, src_ag, src_gar_rowid, dst_ag)
print('Moving aid: %s from src_ag: %s (%s) to dst_ag: %s' % vals)
# ibs.delete_gar([src_gar_rowid])
ibs.add_gar([dst_ag], [aid])
@register_route('/submit/viewpoint', methods=['POST'])
def submit_viewpoint():
ibs = current_app.ibs
method = request.form.get('viewpoint-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
src_ag = request.args.get('src_ag', '')
src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
dst_ag = request.args.get('dst_ag', '')
dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
aid = int(request.form['viewpoint-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
if method.lower() == 'make junk':
ibs.set_annot_quality_texts([aid], [const.QUAL_JUNK])
print('[web] (SET AS JUNK) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
if method.lower() == 'rotate left':
theta = ibs.get_annot_thetas(aid)
theta = (theta + PI / 2) % TAU
ibs.set_annot_thetas(aid, theta)
(xtl, ytl, w, h) = ibs.get_annot_bboxes(aid)
diffx = int(round((w / 2.0) - (h / 2.0)))
diffy = int(round((h / 2.0) - (w / 2.0)))
xtl, ytl, w, h = xtl + diffx, ytl + diffy, h, w
ibs.set_annot_bboxes([aid], [(xtl, ytl, w, h)])
print('[web] (ROTATED LEFT) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
if method.lower() == 'rotate right':
theta = ibs.get_annot_thetas(aid)
theta = (theta - PI / 2) % TAU
ibs.set_annot_thetas(aid, theta)
(xtl, ytl, w, h) = ibs.get_annot_bboxes(aid)
diffx = int(round((w / 2.0) - (h / 2.0)))
diffy = int(round((h / 2.0) - (w / 2.0)))
xtl, ytl, w, h = xtl + diffx, ytl + diffy, h, w
ibs.set_annot_bboxes([aid], [(xtl, ytl, w, h)])
print('[web] (ROTATED RIGHT) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
else:
if src_ag is not None and dst_ag is not None:
movegroup_aid(ibs, aid, src_ag, dst_ag)
value = int(request.form['viewpoint-value'])
yaw = convert_old_viewpoint_to_yaw(value)
ibs.set_annot_yaws([aid], [yaw], input_is_degrees=False)
print('[web] turk_id: %s, aid: %d, yaw: %d' % (turk_id, aid, yaw))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_viewpoint', eid=eid, src_ag=src_ag,
dst_ag=dst_ag, previous=aid))
@register_route('/submit/quality', methods=['POST'])
def submit_quality():
ibs = current_app.ibs
method = request.form.get('quality-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['quality-aid'])
turk_id = request.cookies.get('turk_id', -1)
src_ag = request.args.get('src_ag', '')
src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
dst_ag = request.args.get('dst_ag', '')
dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
if src_ag is not None and dst_ag is not None:
movegroup_aid(ibs, aid, src_ag, dst_ag)
quality = int(request.form['quality-value'])
ibs.set_annot_qualities([aid], [quality])
print('[web] turk_id: %s, aid: %d, quality: %d' % (turk_id, aid, quality))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_quality', eid=eid, src_ag=src_ag,
dst_ag=dst_ag, previous=aid))
@register_route('/submit/additional', methods=['POST'])
def submit_additional():
ibs = current_app.ibs
method = request.form.get('additional-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['additional-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
sex = int(request.form['additional-sex-value'])
age = int(request.form['additional-age-value'])
age_min = None
age_max = None
# Sex
if sex >= 2:
sex -= 2
else:
sex = -1
if age == 1:
age_min = None
age_max = None
elif age == 2:
age_min = None
age_max = 2
elif age == 3:
age_min = 3
age_max = 5
elif age == 4:
age_min = 6
age_max = 11
elif age == 5:
age_min = 12
age_max = 23
elif age == 6:
age_min = 24
age_max = 35
elif age == 7:
age_min = 36
age_max = None
ibs.set_annot_sex([aid], [sex])
nid = ibs.get_annot_name_rowids(aid)
DAN_SPECIAL_WRITE_AGE_TO_ALL_ANOTATIONS = True
if nid is not None and DAN_SPECIAL_WRITE_AGE_TO_ALL_ANOTATIONS:
aid_list = ibs.get_name_aids(nid)
ibs.set_annot_age_months_est_min(aid_list, [age_min] * len(aid_list))
ibs.set_annot_age_months_est_max(aid_list, [age_max] * len(aid_list))
else:
ibs.set_annot_age_months_est_min([aid], [age_min])
ibs.set_annot_age_months_est_max([aid], [age_max])
print('[web] turk_id: %s, aid: %d, sex: %r, age: %r' % (turk_id, aid, sex, age))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_additional', eid=eid, previous=aid))
@register_route('/ajax/cookie')
def set_cookie():
response = make_response('true')
response.set_cookie(request.args['name'], request.args['value'])
print('[web] Set Cookie: %r -> %r' % (request.args['name'], request.args['value'], ))
return response
@register_route('/ajax/image/src/<gid>')
def image_src(gid=None, fresh=False, **kwargs):
ibs = current_app.ibs
# gpath = ibs.get_image_paths(gid)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
fresh = fresh or 'fresh' in request.args or 'fresh' in request.form
if fresh:
# print('*' * 80)
# print('\n\n')
# print('RUNNING WITH FRESH')
# print('\n\n')
# print('*' * 80)
# ut.remove_dirs(gpath)
import os
os.remove(gpath)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
return ap.return_src(gpath)
@register_api('/api/image/<gid>/', methods=['GET'])
def image_src_api(gid=None, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /api/image/<gid>/
"""
return image_src(gid, fresh=fresh, **kwargs)
@register_route('/api/image/view/<gid>/', methods=['GET'])
def image_view_api(gid=None, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /api/image/view/<gid>/
"""
encoded = image_src(gid, fresh=fresh, **kwargs)
return ap.template(None, 'single', encoded=encoded)
@register_api('/api/image/zip', methods=['POST'])
def image_upload_zip(**kwargs):
r"""
Returns the gid_list for image files submitted in a ZIP archive. The image
archive should be flat (no folders will be scanned for images) and must be smaller
than 100 MB. The archive can submit multiple images, ideally in JPEG format to save
space. Duplicate image uploads will result in the duplicate images receiving
the same gid based on the hashed pixel values.
Args:
image_zip_archive (binary): the POST variable containing the binary
(multi-form) image archive data
**kwargs: Arbitrary keyword arguments; the kwargs are passed down to
the add_images function
Returns:
gid_list (list if rowids): the list of gids corresponding to the images
submitted. The gids correspond to the image names sorted in
lexigraphical order.
RESTful:
Method: POST
URL: /api/image/zip
"""
ibs = current_app.ibs
# Get image archive
image_archive = request.files.get('image_zip_archive', None)
if image_archive is None:
raise IOError('Image archive not given')
# If the directory already exists, delete it
uploads_path = ibs.get_uploadsdir()
ut.ensuredir(uploads_path)
current_time = time.strftime('%Y_%m_%d_%H_%M_%S')
modifier = 1
upload_path = '%s' % (current_time)
while exists(upload_path):
upload_path = '%s_%04d' % (current_time, modifier)
modifier += 1
upload_path = join(uploads_path, upload_path)
ut.ensuredir(upload_path)
# Extract the content
try:
with zipfile.ZipFile(image_archive, 'r') as zfile:
zfile.extractall(upload_path)
except Exception:
ut.remove_dirs(upload_path)
raise IOError('Image archive extracton failed')
"""
test to ensure Directory and utool do the same thing
from detecttools.directory import Directory
upload_path = ut.truepath('~/Pictures')
gpath_list1 = sorted(ut.list_images(upload_path, recursive=False, full=True))
direct = Directory(upload_path, include_file_extensions='images', recursive=False)
gpath_list = direct.files()
gpath_list = sorted(gpath_list)
assert gpath_list1 == gpath_list
"""
gpath_list = sorted(ut.list_images(upload_path, recursive=False, full=True))
#direct = Directory(upload_path, include_file_extensions='images', recursive=False)
#gpath_list = direct.files()
#gpath_list = sorted(gpath_list)
gid_list = ibs.add_images(gpath_list, **kwargs)
return gid_list
@register_api('/api/image/json/', methods=['POST'])
def add_images_json(ibs, image_uri_list, image_uuid_list, image_width_list,
image_height_list, image_orig_name_list=None, image_ext_list=None,
image_time_posix_list=None, image_gps_lat_list=None,
image_gps_lon_list=None, image_notes_list=None, **kwargs):
"""
REST:
Method: POST
URL: /api/image/json/
Ignore:
sudo pip install boto
Args:
image_uri_list (list) : list of string image uris, most likely HTTP(S) or S3
encoded URLs. Alternatively, this can be a list of dictionaries (JSON
objects) that specify AWS S3 stored assets. An example below:
image_uri_list = [
'http://domain.com/example/asset1.png',
'/home/example/Desktop/example/asset2.jpg',
's3://s3.amazon.com/example-bucket-2/asset1-in-bucket-2.tif',
{
'bucket' : 'example-bucket-1',
'key' : 'example/asset1.png',
'auth_domain' : None, # Uses localhost
'auth_access_id' : None, # Uses system default
'auth_secret_key' : None, # Uses system default
},
{
'bucket' : 'example-bucket-1',
'key' : 'example/asset2.jpg',
# if unspecified, auth uses localhost and system defaults
},
{
'bucket' : 'example-bucket-2',
'key' : 'example/asset1-in-bucket-2.tif',
'auth_domain' : 's3.amazon.com',
'auth_access_id' : '____________________',
'auth_secret_key' : '________________________________________',
},
]
Note that you cannot specify AWS authentication access ids or secret keys
using string uri's. For specific authentication methods, please use the
latter list of dictionaries.
image_uuid_list (list of str) : list of image UUIDs to be used in IBEIS IA
image_width_list (list of int) : list of image widths
image_height_list (list of int) : list of image heights
image_orig_name_list (list of str): list of original image names
image_ext_list (list of str): list of original image names
image_time_posix_list (list of int): list of image's POSIX timestamps
image_gps_lat_list (list of float): list of image's GPS latitude values
image_gps_lon_list (list of float): list of image's GPS longitude values
image_notes_list (list of str) : optional list of any related notes with
the images
**kwargs : key-value pairs passed to the ibs.add_images() function.
CommandLine:
python -m ibeis.web.app --test-add_images_json
Example:
>>> # WEB_DOCTEST
>>> import uuid
>>> import ibeis
>>> from ibeis.control.IBEISControl import * # NOQA
>>> web_instance = ibeis.opendb(db='testdb1')
>>> _payload = {
>>> 'image_uri_list': [
>>> 'https://upload.wikimedia.org/wikipedia/commons/4/49/Zebra_running_Ngorongoro.jpg',
>>> {
>>> 'bucket' : 'test-asset-store',
>>> 'key' : 'caribwhale/20130903-JAC-0002.JPG',
>>> },
>>> ],
>>> 'image_uuid_list': [
>>> uuid.UUID('7fea8101-7dec-44e3-bf5d-b8287fd231e2'),
>>> uuid.UUID('c081119a-e08e-4863-a710-3210171d27d6'),
>>> ],
>>> 'image_width_list': [
>>> 1992,
>>> 1194,
>>> ],
>>> 'image_height_list': [
>>> 1328,
>>> 401,
>>> ],
>>> }
>>> gid_list = ibeis.web.app.add_images_json(web_instance, **_payload)
>>> print(gid_list)
>>> print(web_instance.get_image_uuids(gid_list))
>>> print(web_instance.get_image_uris(gid_list))
>>> print(web_instance.get_image_paths(gid_list))
>>> print(web_instance.get_image_uris_original(gid_list))
"""
import uuid
def _get_standard_ext(gpath):
ext = splitext(gpath)[1].lower()
return '.jpg' if ext == '.jpeg' else ext
def _parse_imageinfo(index):
def _resolve_uri():
list_ = image_uri_list
if list_ is None or index >= len(list_) or list_[index] is None:
raise ValueError('Must specify all required fields')
value = list_[index]
if isinstance(value, dict):
value = ut.s3_dict_encode_to_str(value)
return value
def _resolve(list_, default='', assert_=False):
if list_ is None or index >= len(list_) or list_[index] is None:
if assert_:
raise ValueError('Must specify all required fields')
return default
return list_[index]
uri = _resolve_uri()
orig_gname = basename(uri)
ext = _get_standard_ext(uri)
uuid_ = _resolve(image_uuid_list, assert_=True)
print(uuid_, type(uuid_))
if isinstance(uuid_, (str, unicode)):
uuid_ = uuid.UUID(uuid_)
print(uuid_, type(uuid_))
param_tup = (
uuid_,
uri,
uri,
_resolve(image_orig_name_list, default=orig_gname),
_resolve(image_ext_list, default=ext),
int(_resolve(image_width_list, assert_=True)),
int(_resolve(image_height_list, assert_=True)),
int(_resolve(image_time_posix_list, default=-1)),
float(_resolve(image_gps_lat_list, default=-1.0)),
float(_resolve(image_gps_lon_list, default=-1.0)),
_resolve(image_notes_list),
)
return param_tup
# TODO: FIX ME SO THAT WE DON'T HAVE TO LOCALIZE EVERYTHING
kwargs['auto_localize'] = kwargs.get('auto_localize', True)
kwargs['sanitize'] = kwargs.get('sanitize', False)
index_list = range(len(image_uri_list))
params_gen = ut.generate(_parse_imageinfo, index_list, adjust=True,
force_serial=True, **kwargs)
params_gen = list(params_gen)
gpath_list = [ _[0] for _ in params_gen ]
gid_list = ibs.add_images(gpath_list, params_list=params_gen, **kwargs)
return gid_list
@register_api('/api/annot/json/', methods=['POST'])
def add_annots_json(ibs, image_uuid_list, annot_uuid_list, annot_bbox_list,
annot_theta_list=None, annot_species_list=None,
annot_name_list=None, annot_notes_list=None, **kwargs):
"""
REST:
Method: POST
URL: /api/annot/json/
Ignore:
sudo pip install boto
Args:
image_uuid_list (list of str) : list of image UUIDs to be used in IBEIS IA
annot_uuid_list (list of str) : list of annotations UUIDs to be used in IBEIS IA
annot_bbox_list (list of 4-tuple) : list of bounding box coordinates encoded as
a 4-tuple of the values (xtl, ytl, width, height) where xtl is the
'top left corner, x value' and ytl is the 'top left corner, y value'.
annot_theta_list (list of float) : list of radian rotation around center.
Defaults to 0.0 (no rotation).
annot_species_list (list of str) : list of species for the annotation, if known.
If the list is partially known, use None (null in JSON) for unknown entries.
annot_name_list (list of str) : list of names for the annotation, if known.
If the list is partially known, use None (null in JSON) for unknown entries.
annot_notes_list (list of str) : list of notes to be added to the annotation.
**kwargs : key-value pairs passed to the ibs.add_annots() function.
CommandLine:
python -m ibeis.web.app --test-add_annots_json
Example:
>>> import uuid
>>> import ibeis
>>> from ibeis.control.IBEISControl import * # NOQA
>>> web_instance = ibeis.opendb(db='testdb1')
>>> _payload = {
>>> 'image_uuid_list': [
>>> uuid.UUID('7fea8101-7dec-44e3-bf5d-b8287fd231e2'),
>>> uuid.UUID('c081119a-e08e-4863-a710-3210171d27d6'),
>>> ],
>>> 'annot_uuid_list': [
>>> uuid.UUID('fe1547c5-1425-4757-9b8f-b2b4a47f552d'),
>>> uuid.UUID('86d3959f-7167-4822-b99f-42d453a50745'),
>>> ],
>>> 'annot_bbox_list': [
>>> [0, 0, 1992, 1328],
>>>> [0, 0, 1194, 401],
>>> ],
>>> }
>>> aid_list = ibeis.web.app.add_annots_json(web_instance, **_payload)
>>> print(aid_list)
>>> print(web_instance.get_annot_image_uuids(aid_list))
>>> print(web_instance.get_annot_uuids(aid_list))
>>> print(web_instance.get_annot_bboxes(aid_list))
"""
import uuid
image_uuid_list = [
uuid.UUID(uuid_) if isinstance(uuid_, (str, unicode)) else uuid_
for uuid_ in image_uuid_list
]
gid_list = ibs.get_image_gids_from_uuid(image_uuid_list)
return ibs.add_annots(gid_list, annot_uuid_list=annot_uuid_list,
bbox_list=annot_bbox_list, theta_list=annot_theta_list,
species_list=annot_species_list, name_list=annot_name_list,
notes_list=annot_notes_list, **kwargs)
@register_api('/api/image/', methods=['POST'])
def image_upload(cleanup=True, **kwargs):
r"""
Returns the gid for an uploaded image.
Args:
image (image binary): the POST variable containing the binary
(multi-form) image data
**kwargs: Arbitrary keyword arguments; the kwargs are passed down to
the add_images function
Returns:
gid (rowids): gid corresponding to the image submitted.
lexigraphical order.
RESTful:
Method: POST
URL: /api/image/
"""
ibs = current_app.ibs
print('request.files = %s' % (request.files,))
filestore = request.files.get('image', None)
if filestore is None:
raise IOError('Image not given')
uploads_path = ibs.get_uploadsdir()
ut.ensuredir(uploads_path)
current_time = time.strftime('%Y_%m_%d_%H_%M_%S')
modifier = 1
upload_filename = 'upload_%s.png' % (current_time)
while exists(upload_filename):
upload_filename = 'upload_%s_%04d.png' % (current_time, modifier)
modifier += 1
upload_filepath = join(uploads_path, upload_filename)
filestore.save(upload_filepath)
gid_list = ibs.add_images([upload_filepath], **kwargs)
gid = gid_list[0]
if cleanup:
ut.remove_dirs(upload_filepath)
return gid
@register_api('/api/core/helloworld/', methods=['GET', 'POST', 'DELETE', 'PUT'])
def hello_world(*args, **kwargs):
print('------------------ HELLO WORLD ------------------')
print('Args:', args)
print('Kwargs:', kwargs)
print('request.args:', request.args)
print('request.form', request.form)
VALID_TURK_MODES = [
('turk_viewpoint', 'Viewpoint'),
('turk_quality', 'Quality'),
]
@register_route('/group_review/')
def group_review():
prefill = request.args.get('prefill', '')
if len(prefill) > 0:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
bad_species_list, bad_viewpoint_list = ibs.validate_annot_species_viewpoint_cnn(aid_list)
GROUP_BY_PREDICTION = True
if GROUP_BY_PREDICTION:
grouped_dict = ut.group_items(bad_viewpoint_list, ut.get_list_column(bad_viewpoint_list, 3))
grouped_list = grouped_dict.values()
regrouped_items = ut.flatten(ut.sortedby(grouped_list, map(len, grouped_list)))
candidate_aid_list = ut.get_list_column(regrouped_items, 0)
else:
candidate_aid_list = [ bad_viewpoint[0] for bad_viewpoint in bad_viewpoint_list]
elif request.args.get('aid_list', None) is not None:
aid_list = request.args.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
candidate_aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
candidate_aid_list = ''
else:
candidate_aid_list = ''
return ap.template(None, 'group_review', candidate_aid_list=candidate_aid_list, mode_list=VALID_TURK_MODES)
@register_route('/group_review/submit/', methods=['POST'])
def group_review_submit():
"""
CommandLine:
python -m ibeis.web.app --exec-group_review_submit
Example:
>>> # UNSTABLE_DOCTEST
>>> from ibeis.web.app import * # NOQA
>>> import ibeis
>>> import ibeis.web
>>> ibs = ibeis.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[::2]
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
method = request.form.get('group-review-submit', '')
if method.lower() == 'populate':
redirection = request.referrer
if 'prefill' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&prefill=true' % (redirection, )
else:
redirection = '%s?prefill=true' % (redirection, )
return redirect(redirection)
aid_list = request.form.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
aid_list = []
src_ag, dst_ag = ibs.prepare_annotgroup_review(aid_list)
valid_modes = ut.get_list_column(VALID_TURK_MODES, 0)
mode = request.form.get('group-review-mode', None)
assert mode in valid_modes
return redirect(url_for(mode, src_ag=src_ag, dst_ag=dst_ag))
@register_route('/ajax/annotation/src/<aid>')
def annotation_src(aid=None):
ibs = current_app.ibs
gpath = ibs.get_annot_chip_fpath(aid)
return ap.return_src(gpath)
@register_api('/api/annot/<aid>/', methods=['GET'])
def annotation_src_api(aid=None):
r"""
Returns the base64 encoded image of annotation <aid>
RESTful:
Method: GET
URL: /api/annot/<aid>/
"""
return annotation_src(aid)
@register_route('/display/sightings')
def display_sightings(html_encode=True):
ibs = current_app.ibs
complete = request.args.get('complete', None) is not None
sightings = ibs.report_sightings_str(complete=complete, include_images=True)
if html_encode:
sightings = sightings.replace('\n', '<br/>')
return sightings
@register_route('/download/sightings')
def download_sightings():
filename = 'sightings.csv'
sightings = display_sightings(html_encode=False)
return ap.send_file(sightings, filename)
@register_route('/graph/sightings')
def graph_sightings():
return redirect(url_for('view'))
@register_route('/dbinfo')
def dbinfo():
try:
ibs = current_app.ibs
dbinfo_str = ibs.get_dbinfo_str()
except:
dbinfo_str = ''
dbinfo_str_formatted = '<pre>%s</pre>' % (dbinfo_str, )
return dbinfo_str_formatted
@register_route('/api')
def api():
rules = current_app.url_map.iter_rules()
rule_dict = {}
for rule in rules:
methods = rule.methods
url = str(rule)
if '/api/' in url:
methods -= set(['HEAD', 'OPTIONS'])
if len(methods) == 0:
continue
if len(methods) > 1:
print('methods = %r' % (methods,))
method = list(methods)[0]
if method not in rule_dict.keys():
rule_dict[method] = []
rule_dict[method].append((method, url, ))
for method in rule_dict.keys():
rule_dict[method].sort()
url = '%s/api/core/dbname/' % (current_app.server_url, )
app_auth = controller_inject.get_url_authorization(url)
return ap.template(None, 'api',
app_url=url,
app_name=controller_inject.GLOBAL_APP_NAME,
app_secret=controller_inject.GLOBAL_APP_SECRET,
app_auth=app_auth,
rule_list=rule_dict)
@register_route('/upload')
def upload():
return ap.template(None, 'upload')
@register_route('/404')
def error404(exception=None):
import traceback
exception_str = str(exception)
traceback_str = str(traceback.format_exc())
print('[web] %r' % (exception_str, ))
print('[web] %r' % (traceback_str, ))
return ap.template(None, '404', exception_str=exception_str,
traceback_str=traceback_str)
################################################################################
def start_tornado(ibs, port=None, browser=BROWSER, url_suffix=''):
"""
Initialize the web server
"""
def _start_tornado(ibs_, port_):
# Get Flask app
app = controller_inject.get_flask_app()
app.ibs = ibs_
# Try to ascertain the socket's domain name
try:
app.server_domain = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
app.server_domain = '127.0.0.1'
app.server_port = port_
# URL for the web instance
app.server_url = 'http://%s:%s' % (app.server_domain, app.server_port)
print('[web] Tornado server starting at %s' % (app.server_url,))
# Launch the web browser to view the web interface and API
if browser:
url = app.server_url + url_suffix
import webbrowser
print('[web] opening browser with url = %r' % (url,))
webbrowser.open(url)
# Start the tornado web handler
# WSGI = Web Server Gateway Interface
# WSGI is Python standard described in detail in PEP 3333
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(app.server_port)
tornado.ioloop.IOLoop.instance().start()
# Set logging level
logging.getLogger().setLevel(logging.INFO)
# Get the port if unspecified
if port is None:
port = DEFAULT_WEB_API_PORT
# Launch the web handler
_start_tornado(ibs, port)
def start_from_ibeis(ibs, port=None, browser=BROWSER, precache=None, url_suffix='', start_job_queue=True):
"""
Parse command line options and start the server.
CommandLine:
python -m ibeis --db PZ_MTEST --web
python -m ibeis --db PZ_MTEST --web --browser
"""
if precache is None:
precache = ut.get_argflag('--precache')
if precache:
print('[web] Pre-computing all image thumbnails (with annots)...')
ibs.preprocess_image_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
ibs.preprocess_image_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
ibs.check_chip_existence()
ibs.compute_all_chips()
if start_job_queue:
#from ibeis.web import zmq_task_queue
#ibs.load_plugin_module(zmq_task_queue)
#import time
#time.sleep(1)
ibs.initialize_job_manager()
#time.sleep(10)
start_tornado(ibs, port, browser, url_suffix)
ibs.close_job_manager()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.web.app
python -m ibeis.web.app --allexamples
python -m ibeis.web.app --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
Small type cast for unicode literals
# -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
"""
from __future__ import absolute_import, division, print_function
import random
from os.path import join, exists, splitext, basename
import zipfile
import time
import math
import tornado.wsgi
import tornado.httpserver
from flask import request, redirect, url_for, make_response, current_app
import logging
import socket
import simplejson as json
from ibeis.control import controller_inject
from ibeis.control.SQLDatabaseControl import (SQLDatabaseController, # NOQA
SQLAtomicContext)
import ibeis.constants as const
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY, Species, PI, TAU
from ibeis.web import appfuncs as ap
from ibeis.web import zmq_task_queue # NOQA
import utool as ut
DEFAULT_WEB_API_PORT = ut.get_argval('--port', type_=int, default=5000)
register_api = controller_inject.get_ibeis_flask_api(__name__)
register_route = controller_inject.get_ibeis_flask_route(__name__)
BROWSER = ut.get_argflag('--browser')
PAGE_SIZE = 500
################################################################################
def default_species(ibs):
# hack function
dbname = ibs.get_dbname()
if dbname == 'CHTA_Master':
default_species = Species.CHEETAH
elif dbname == 'ELPH_Master':
default_species = Species.ELEPHANT_SAV
elif dbname == 'GIR_Master':
default_species = Species.GIRAFFE
elif dbname == 'GZ_Master':
default_species = Species.ZEB_GREVY
elif dbname == 'LION_Master':
default_species = Species.LION
elif dbname == 'PZ_Master':
default_species = Species.ZEB_PLAIN
elif dbname == 'WD_Master':
default_species = Species.WILDDOG
elif dbname == 'NNP_MasterGIRM':
default_species = Species.GIRAFFE_MASAI
elif 'NNP_' in dbname:
default_species = Species.ZEB_PLAIN
elif 'GZC' in dbname:
default_species = Species.ZEB_PLAIN
else:
default_species = None
print('[web] DEFAULT SPECIES: %r' % (default_species))
return default_species
def encounter_image_processed(ibs, gid_list):
images_reviewed = [ reviewed == 1 for reviewed in ibs.get_image_reviewed(gid_list) ]
return images_reviewed
def encounter_annot_viewpoint_processed(ibs, aid_list):
annots_reviewed = [ reviewed is not None for reviewed in ibs.get_annot_yaws(aid_list) ]
return annots_reviewed
def encounter_annot_quality_processed(ibs, aid_list):
annots_reviewed = [ reviewed is not None and reviewed is not -1 for reviewed in ibs.get_annot_qualities(aid_list) ]
return annots_reviewed
def encounter_annot_additional_processed(ibs, aid_list, nid_list):
sex_list = ibs.get_annot_sex(aid_list)
age_list = ibs.get_annot_age_months_est(aid_list)
annots_reviewed = [
(nid < 0) or (nid > 0 and sex >= 0 and -1 not in list(age) and list(age).count(None) < 2)
for nid, sex, age in zip(nid_list, sex_list, age_list)
]
return annots_reviewed
def convert_old_viewpoint_to_yaw(view_angle):
""" we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0, 0.000 * TAU,),
>>> ('frontleft' , 45, 0.125 * TAU,),
>>> ('front' , 90, 0.250 * TAU,),
>>> ('frontright' , 135, 0.375 * TAU,),
>>> ('right' , 180, 0.500 * TAU,),
>>> ('backright' , 225, 0.625 * TAU,),
>>> ('back' , 270, 0.750 * TAU,),
>>> ('backleft' , 315, 0.875 * TAU,),
>>> ]
>>> fmtstr = 'old %15r %.2f -> new %15r %.2f'
>>> for lbl, angle, radians in old_viewpoint_labels:
>>> print(fmtstr % (lbl, angle, lbl, convert_old_viewpoint_to_yaw(angle)))
"""
if view_angle is None:
return None
view_angle = ut.deg_to_rad(view_angle)
yaw = (-view_angle + (TAU / 2)) % TAU
return yaw
def convert_yaw_to_old_viewpoint(yaw):
""" we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0, 0.000 * TAU,),
>>> ('frontleft' , 45, 0.125 * TAU,),
>>> ('front' , 90, 0.250 * TAU,),
>>> ('frontright' , 135, 0.375 * TAU,),
>>> ('right' , 180, 0.500 * TAU,),
>>> ('backright' , 225, 0.625 * TAU,),
>>> ('back' , 270, 0.750 * TAU,),
>>> ('backleft' , 315, 0.875 * TAU,),
>>> ]
>>> fmtstr = 'original_angle %15r %.2f -> yaw %15r %.2f -> reconstructed_angle %15r %.2f'
>>> for lbl, angle, radians in old_viewpoint_labels:
>>> yaw = convert_old_viewpoint_to_yaw(angle)
>>> reconstructed_angle = convert_yaw_to_old_viewpoint(yaw)
>>> print(fmtstr % (lbl, angle, lbl, yaw, lbl, reconstructed_angle))
"""
if yaw is None:
return None
view_angle = ((TAU / 2) - yaw) % TAU
view_angle = ut.rad_to_deg(view_angle)
return view_angle
################################################################################
@register_route('/')
def root():
return ap.template(None)
@register_route('/view')
def view():
def _date_list(gid_list):
unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(unixtime)
if unixtime is not None else
'UNKNOWN'
for unixtime in unixtime_list
]
datetime_split_list = [ datetime.split(' ') for datetime in datetime_list ]
date_list = [ datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN' for datetime_split in datetime_split_list ]
return date_list
ibs = current_app.ibs
aid_list = ibs.filter_aids_count()
gid_list = ibs.get_annot_gids(aid_list)
nid_list = ibs.get_annot_name_rowids(aid_list)
date_list = _date_list(gid_list)
gid_list_unique = list(set(gid_list))
date_list_unique = _date_list(gid_list_unique)
date_taken_dict = {}
for gid, date in zip(gid_list_unique, date_list_unique):
if date not in date_taken_dict:
date_taken_dict[date] = [0, 0]
date_taken_dict[date][1] += 1
gid_list_all = ibs.get_valid_gids()
date_list_all = _date_list(gid_list_all)
for gid, date in zip(gid_list_all, date_list_all):
if date in date_taken_dict:
date_taken_dict[date][0] += 1
value = 0
label_list = []
value_list = []
index_list = []
seen_set = set()
current_seen_set = set()
previous_seen_set = set()
last_date = None
date_seen_dict = {}
for index, (aid, nid, date) in enumerate(zip(aid_list, nid_list, date_list)):
index_list.append(index + 1)
# Add to counters
if date not in date_seen_dict:
date_seen_dict[date] = [0, 0, 0, 0]
date_seen_dict[date][0] += 1
if nid not in current_seen_set:
current_seen_set.add(nid)
date_seen_dict[date][1] += 1
if nid in previous_seen_set:
date_seen_dict[date][3] += 1
if nid not in seen_set:
seen_set.add(nid)
value += 1
date_seen_dict[date][2] += 1
# Add to register
value_list.append(value)
# Reset step (per day)
if date != last_date and date != 'UNKNOWN':
last_date = date
previous_seen_set = set(current_seen_set)
current_seen_set = set()
label_list.append(date)
else:
label_list.append('')
# def optimization1(x, a, b, c):
# return a * np.log(b * x) + c
# def optimization2(x, a, b, c):
# return a * np.sqrt(x) ** b + c
# def optimization3(x, a, b, c):
# return 1.0 / (a * np.exp(-b * x) + c)
# def process(func, opts, domain, zero_index, zero_value):
# values = func(domain, *opts)
# diff = values[zero_index] - zero_value
# values -= diff
# values[ values < 0.0 ] = 0.0
# values[:zero_index] = 0.0
# values = values.astype(int)
# return list(values)
# optimization_funcs = [
# optimization1,
# optimization2,
# optimization3,
# ]
# # Get data
# x = np.array(index_list)
# y = np.array(value_list)
# # Fit curves
# end = int(len(index_list) * 1.25)
# domain = np.array(range(1, end))
# zero_index = len(value_list) - 1
# zero_value = value_list[zero_index]
# regressed_opts = [ curve_fit(func, x, y)[0] for func in optimization_funcs ]
# prediction_list = [
# process(func, opts, domain, zero_index, zero_value)
# for func, opts in zip(optimization_funcs, regressed_opts)
# ]
# index_list = list(domain)
prediction_list = []
date_seen_dict.pop('UNKNOWN', None)
bar_label_list = sorted(date_seen_dict.keys())
bar_value_list1 = [ date_taken_dict[date][0] for date in bar_label_list ]
bar_value_list2 = [ date_taken_dict[date][1] for date in bar_label_list ]
bar_value_list3 = [ date_seen_dict[date][0] for date in bar_label_list ]
bar_value_list4 = [ date_seen_dict[date][1] for date in bar_label_list ]
bar_value_list5 = [ date_seen_dict[date][2] for date in bar_label_list ]
bar_value_list6 = [ date_seen_dict[date][3] for date in bar_label_list ]
# label_list += ['Models'] + [''] * (len(index_list) - len(label_list) - 1)
# value_list += [0] * (len(index_list) - len(value_list))
# Counts
eid_list = ibs.get_valid_eids()
gid_list = ibs.get_valid_gids()
aid_list = ibs.get_valid_aids()
nid_list = ibs.get_valid_nids()
contrib_list = ibs.get_valid_contrib_rowids()
# nid_list = ibs.get_valid_nids()
aid_list_count = ibs.filter_aids_count()
# gid_list_count = list(set(ibs.get_annot_gids(aid_list_count)))
nid_list_count_dup = ibs.get_annot_name_rowids(aid_list_count)
nid_list_count = list(set(nid_list_count_dup))
# Calculate the Petersen-Lincoln index form the last two days
try:
c1 = bar_value_list4[-2]
c2 = bar_value_list4[-1]
c3 = bar_value_list6[-1]
pl_index = int(math.ceil( (c1 * c2) / c3 ))
pl_error_num = float(c1 * c1 * c2 * (c2 - c3))
pl_error_dom = float(c3 ** 3)
pl_error = int(math.ceil( 1.96 * math.sqrt(pl_error_num / pl_error_dom) ))
except IndexError:
# pl_index = 'Undefined - Zero recaptured (k = 0)'
pl_index = 0
pl_error = 0
except ZeroDivisionError:
# pl_index = 'Undefined - Zero recaptured (k = 0)'
pl_index = 0
pl_error = 0
# Get the markers
gid_list_markers = ibs.get_annot_gids(aid_list_count)
gps_list_markers = map(list, ibs.get_image_gps(gid_list_markers))
gps_list_markers_all = map(list, ibs.get_image_gps(gid_list))
REMOVE_DUP_CODE = True
if not REMOVE_DUP_CODE:
# Get the tracks
nid_track_dict = ut.ddict(list)
for nid, gps in zip(nid_list_count_dup, gps_list_markers):
if gps[0] == -1.0 and gps[1] == -1.0:
continue
nid_track_dict[nid].append(gps)
gps_list_tracks = [ nid_track_dict[nid] for nid in sorted(nid_track_dict.keys()) ]
else:
__nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(aid_list=aid_list_count)
gps_list_tracks = list(map(lambda x: list(map(list, x)), gps_track_list))
valid_aids = ibs.get_valid_aids()
valid_gids = ibs.get_valid_gids()
valid_aids_ = ibs.filter_aids_custom(valid_aids)
valid_gids_ = ibs.filter_gids_custom(valid_gids)
used_gids = list(set( ibs.get_annot_gids(valid_aids) ))
used_contrib_tags = list(set( ibs.get_image_contributor_tag(used_gids) ))
# Get Age and sex (By Annot)
# annot_sex_list = ibs.get_annot_sex(valid_aids_)
# annot_age_months_est_min = ibs.get_annot_age_months_est_min(valid_aids_)
# annot_age_months_est_max = ibs.get_annot_age_months_est_max(valid_aids_)
# age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
# for sex, min_age, max_age in zip(annot_sex_list, annot_age_months_est_min, annot_age_months_est_max):
# if sex not in [0, 1]:
# sex = 2
# # continue
# if (min_age is None or min_age < 12) and max_age < 12:
# age_list[sex][0] += 1
# elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
# age_list[sex][1] += 1
# elif 36 <= min_age and (36 <= max_age or max_age is None):
# age_list[sex][2] += 1
# Get Age and sex (By Name)
name_sex_list = ibs.get_name_sex(nid_list_count)
name_age_months_est_mins_list = ibs.get_name_age_months_est_min(nid_list_count)
name_age_months_est_maxs_list = ibs.get_name_age_months_est_max(nid_list_count)
age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
age_unreviewed = 0
age_ambiguous = 0
for nid, sex, min_ages, max_ages in zip(nid_list_count, name_sex_list, name_age_months_est_mins_list, name_age_months_est_maxs_list):
if len(set(min_ages)) > 1 or len(set(max_ages)) > 1:
# print('[web] Invalid name %r: Cannot have more than one age' % (nid, ))
age_ambiguous += 1
continue
min_age = None
max_age = None
if len(min_ages) > 0:
min_age = min_ages[0]
if len(max_ages) > 0:
max_age = max_ages[0]
# Histogram
if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1):
# print('[web] Unreviewded name %r: Specify the age for the name' % (nid, ))
age_unreviewed += 1
continue
if sex not in [0, 1]:
sex = 2
# continue
if (min_age is None or min_age < 12) and max_age < 12:
age_list[sex][0] += 1
elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
age_list[sex][1] += 1
elif 36 <= min_age and (36 <= max_age or max_age is None):
age_list[sex][2] += 1
dbinfo_str = dbinfo()
return ap.template('view',
line_index_list=index_list,
line_label_list=label_list,
line_value_list=value_list,
prediction_list=prediction_list,
pl_index=pl_index,
pl_error=pl_error,
gps_list_markers=gps_list_markers,
gps_list_markers_all=gps_list_markers_all,
gps_list_tracks=gps_list_tracks,
bar_label_list=bar_label_list,
bar_value_list1=bar_value_list1,
bar_value_list2=bar_value_list2,
bar_value_list3=bar_value_list3,
bar_value_list4=bar_value_list4,
bar_value_list5=bar_value_list5,
bar_value_list6=bar_value_list6,
age_list=age_list,
age_ambiguous=age_ambiguous,
age_unreviewed=age_unreviewed,
dbinfo_str=dbinfo_str,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
contrib_list=contrib_list,
contrib_list_str=','.join(map(str, contrib_list)),
num_contribs=len(contrib_list),
gid_list_count=valid_gids_,
gid_list_count_str=','.join(map(str, valid_gids_)),
num_gids_count=len(valid_gids_),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
aid_list_count=valid_aids_,
aid_list_count_str=','.join(map(str, valid_aids_)),
num_aids_count=len(valid_aids_),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
nid_list_count=nid_list_count,
nid_list_count_str=','.join(map(str, nid_list_count)),
num_nids_count=len(nid_list_count),
used_gids=used_gids,
num_used_gids=len(used_gids),
used_contribs=used_contrib_tags,
num_used_contribs=len(used_contrib_tags))
@register_route('/view/encounters')
def view_encounters():
ibs = current_app.ibs
filtered = True
eid = request.args.get('eid', '')
if len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
else:
eid_list = ibs.get_valid_eids()
filtered = False
start_time_posix_list = ibs.get_encounter_start_time_posix(eid_list)
datetime_list = [
ut.unixtime_to_datetimestr(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ]
aids_list = [ ut.flatten(ibs.get_image_aids(gid_list)) for gid_list in gids_list ]
images_reviewed_list = [ encounter_image_processed(ibs, gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ encounter_annot_viewpoint_processed(ibs, aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ encounter_annot_quality_processed(ibs, aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
encounter_list = zip(
eid_list,
ibs.get_encounter_text(eid_list),
ibs.get_encounter_num_gids(eid_list),
image_processed_list,
ibs.get_encounter_num_aids(eid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
encounter_list.sort(key=lambda t: t[7])
return ap.template('view', 'encounters',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
encounter_list=encounter_list,
num_encounters=len(encounter_list))
@register_route('/view/images')
def view_images():
ibs = current_app.ibs
filtered = True
eid_list = []
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
page = max(0, int(request.args.get('page', 1)))
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(eid=eid) for eid_ in eid_list ])
else:
gid_list = ibs.get_valid_gids()
filtered = False
# Page
page_start = min(len(gid_list), (page - 1) * PAGE_SIZE)
page_end = min(len(gid_list), page * PAGE_SIZE)
page_total = int(math.ceil(len(gid_list) / PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(gid_list) else page + 1
gid_list = gid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(gid_list), page_previous, page_next, ))
image_unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ ','.join(map(str, eid_list_)) for eid_list_ in ibs.get_image_eids(gid_list) ],
ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
ibs.get_image_gps(gid_list),
ibs.get_image_party_tag(gid_list),
ibs.get_image_contributor_tag(gid_list),
ibs.get_image_notes(gid_list),
encounter_image_processed(ibs, gid_list),
)
image_list.sort(key=lambda t: t[3])
return ap.template('view', 'images',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/annotations')
def view_annotations():
ibs = current_app.ibs
filtered = True
eid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
page = max(0, int(request.args.get('page', 1)))
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
else:
aid_list = ibs.get_valid_aids()
filtered = False
# Page
page_start = min(len(aid_list), (page - 1) * PAGE_SIZE)
page_end = min(len(aid_list), page * PAGE_SIZE)
page_total = int(math.ceil(len(aid_list) / PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(aid_list) else page + 1
aid_list = aid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(aid_list), page_previous, page_next, ))
annotation_list = zip(
aid_list,
ibs.get_annot_gids(aid_list),
[ ','.join(map(str, eid_list_)) for eid_list_ in ibs.get_annot_eids(aid_list) ],
ibs.get_annot_image_names(aid_list),
ibs.get_annot_names(aid_list),
ibs.get_annot_exemplar_flags(aid_list),
ibs.get_annot_species_texts(aid_list),
ibs.get_annot_yaw_texts(aid_list),
ibs.get_annot_quality_texts(aid_list),
ibs.get_annot_sex_texts(aid_list),
ibs.get_annot_age_months_est(aid_list),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(encounter_annot_viewpoint_processed(ibs, aid_list), encounter_annot_quality_processed(ibs, aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return ap.template('view', 'annotations',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/names')
def view_names():
ibs = current_app.ibs
filtered = True
aid_list = []
eid_list = []
gid_list = []
nid = request.args.get('nid', '')
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
page = max(0, int(request.args.get('page', 1)))
if len(nid) > 0:
nid_list = nid.strip().split(',')
nid_list = [ None if nid_ == 'None' or nid_ == '' else int(nid_) for nid_ in nid_list ]
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
else:
nid_list = ibs.get_valid_nids()
filtered = False
# Page
PAGE_SIZE_ = int(PAGE_SIZE / 5)
page_start = min(len(nid_list), (page - 1) * PAGE_SIZE_)
page_end = min(len(nid_list), page * PAGE_SIZE_)
page_total = int(math.ceil(len(nid_list) / PAGE_SIZE_))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(nid_list) else page + 1
nid_list = nid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(nid_list), page_previous, page_next, ))
aids_list = ibs.get_name_aids(nid_list)
annotations_list = [ zip(
aid_list_,
ibs.get_annot_gids(aid_list_),
[ ','.join(map(str, eid_list_)) for eid_list_ in ibs.get_annot_eids(aid_list_) ],
ibs.get_annot_image_names(aid_list_),
ibs.get_annot_names(aid_list_),
ibs.get_annot_exemplar_flags(aid_list_),
ibs.get_annot_species_texts(aid_list_),
ibs.get_annot_yaw_texts(aid_list_),
ibs.get_annot_quality_texts(aid_list_),
ibs.get_annot_sex_texts(aid_list_),
ibs.get_annot_age_months_est(aid_list_),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(encounter_annot_viewpoint_processed(ibs, aid_list_), encounter_annot_quality_processed(ibs, aid_list_)) ],
) for aid_list_ in aids_list ]
name_list = zip(
nid_list,
annotations_list
)
name_list.sort(key=lambda t: t[0])
return ap.template('view', 'names',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
name_list=name_list,
num_names=len(name_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/turk')
def turk():
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
return ap.template('turk', None, eid=eid)
@register_route('/turk/detection')
def turk_detection():
ibs = current_app.ibs
refer_aid = request.args.get('refer_aid', None)
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(ibs, gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
enctext = None if eid is None else ibs.get_encounter_text(eid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list_ = ut.filterfalse_items(gid_list, reviewed_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('detection_instructions_seen', 1) == 0
display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0
if not finished:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = ibs.get_image_sizes(gid)
scale_factor = float(ap.TARGET_WIDTH) / float(width)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = int(scale_factor * annot_bbox[0])
temp['top'] = int(scale_factor * annot_bbox[1])
temp['width'] = int(scale_factor * (annot_bbox[2]))
temp['height'] = int(scale_factor * (annot_bbox[3]))
temp['label'] = species
temp['id'] = aid
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif default_species(ibs) is not None:
species = default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
return ap.template('turk', 'detection',
eid=eid,
gid=gid,
refer_aid=refer_aid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
review=review)
def get_turk_annot_args(is_reviewed_func):
"""
Helper to return aids in an encounter or a group review
"""
ibs = current_app.ibs
def _ensureid(_id):
return None if _id == 'None' or _id == '' else int(_id)
eid = request.args.get('eid', '')
src_ag = request.args.get('src_ag', '')
dst_ag = request.args.get('dst_ag', '')
eid = _ensureid(eid)
src_ag = _ensureid(src_ag)
dst_ag = _ensureid(dst_ag)
group_review_flag = src_ag is not None and dst_ag is not None
if not group_review_flag:
gid_list = ibs.get_valid_gids(eid=eid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
reviewed_list = is_reviewed_func(ibs, aid_list)
else:
src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag)
dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag)
src_aid_list = ibs.get_gar_aid(src_gar_rowid_list)
dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list)
aid_list = src_aid_list
reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ]
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
if group_review_flag:
aid = aid_list_[0]
else:
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
print('aid = %r' % (aid,))
#print(ut.dict_str(ibs.get_annot_info(aid)))
print(ut.obj_str(ibs.get_annot_info(aid, default=True, nl=True)))
return aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous
@register_route('/turk/viewpoint')
def turk_viewpoint():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = get_turk_annot_args(encounter_annot_viewpoint_processed)
(aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous) = tup
value = convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('viewpoint_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
enctext = ibs.get_encounter_text(eid)
return ap.template('turk', 'viewpoint',
eid=eid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/quality')
def turk_quality():
"""
PZ Needs Tags:
17242
14468
14427
15946
14771
14084
4102
6074
3409
GZ Needs Tags;
1302
CommandLine:
python -m ibeis.web.app --exec-turk_quality --db PZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GIRM_Master1
Example:
>>> # SCRIPT
>>> from ibeis.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid_list_ = ibs.find_unlabeled_name_members(qual=True)
>>> valid_views = ['primary', 'primary1', 'primary-1']
>>> aid_list = ibs.filter_aids_to_viewpoint(aid_list_, valid_views, unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = get_turk_annot_args(encounter_annot_quality_processed)
(aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous) = tup
value = ibs.get_annot_qualities(aid)
if value == -1:
value = None
if value == 0:
value = 1
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('quality_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
enctext = ibs.get_encounter_text(eid)
return ap.template('turk', 'quality',
eid=eid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
##@register_route('/turk/viewpoint')
#def old_turk_viewpoint():
# #ibs = current_app.ibs
# #eid = request.args.get('eid', '')
# #eid = None if eid == 'None' or eid == '' else int(eid)
# #enctext = None if eid is None else ibs.get_encounter_text(eid)
# #src_ag = request.args.get('src_ag', '')
# #src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
# #dst_ag = request.args.get('dst_ag', '')
# #dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
# #group_review_flag = src_ag is not None and dst_ag is not None
# #if not group_review_flag:
# # gid_list = ibs.get_valid_gids(eid=eid)
# # aid_list = ut.flatten(ibs.get_image_aids(gid_list))
# # reviewed_list = encounter_annot_viewpoint_processed(ibs, aid_list)
# #else:
# # src_gar_rowid_list = ibs.get_annotgroup_gar_rowids(src_ag)
# # dst_gar_rowid_list = ibs.get_annotgroup_gar_rowids(dst_ag)
# # src_aid_list = ibs.get_gar_aid(src_gar_rowid_list)
# # dst_aid_list = ibs.get_gar_aid(dst_gar_rowid_list)
# # aid_list = src_aid_list
# # reviewed_list = [ src_aid in dst_aid_list for src_aid in src_aid_list ]
# #previous = request.args.get('previous', None)
# ibs = current_app.ibs
# tup = get_turk_annot_args(encounter_annot_viewpoint_processed)
# (aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous) = tup
# value = convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
# review = 'review' in request.args.keys()
# finished = aid is None
# display_instructions = request.cookies.get('viewpoint_instructions_seen', 1) == 0
# if not finished:
# gid = ibs.get_annot_gids(aid)
# gpath = ibs.get_annot_chip_fpath(aid)
# image = ap.open_oriented_image(gpath)
# image_src = ap.embed_image_html(image)
# else:
# gid = None
# gpath = None
# image_src = None
# enctext = ibs.get_encounter_text(eid)
# return ap.template('turk', 'viewpoint',
# eid=eid,
# src_ag=src_ag,
# dst_ag=dst_ag,
# gid=gid,
# aid=aid,
# value=value,
# image_path=gpath,
# image_src=image_src,
# previous=previous,
# enctext=enctext,
# progress=progress,
# finished=finished,
# display_instructions=display_instructions,
# review=review)
#@register_route('/turk/quality')
#def old_turk_quality():
# #ibs = current_app.ibs
# #eid = request.args.get('eid', '')
# #eid = None if eid == 'None' or eid == '' else int(eid)
# #gid_list = ibs.get_valid_gids(eid=eid)
# #aid_list = ut.flatten(ibs.get_image_aids(gid_list))
# #reviewed_list = encounter_annot_quality_processed(ibs, aid_list)
# #try:
# # progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
# #except ZeroDivisionError:
# # progress = '0.00'
# #aid = request.args.get('aid', '')
# #if len(aid) > 0:
# # aid = int(aid)
# #else:
# # aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
# # if len(aid_list_) == 0:
# # aid = None
# # else:
# # # aid = aid_list_[0]
# # aid = random.choice(aid_list_)
# #previous = request.args.get('previous', None)
# ibs = current_app.ibs
# tup = get_turk_annot_args(encounter_annot_quality_processed)
# (aid_list, reviewed_list, eid, src_ag, dst_ag, progress, aid, previous) = tup
# value = ibs.get_annot_qualities(aid)
# if value == -1:
# value = None
# if value == 0:
# value = 1
# review = 'review' in request.args.keys()
# finished = aid is None
# display_instructions = request.cookies.get('quality_instructions_seen', 1) == 0
# if not finished:
# gid = ibs.get_annot_gids(aid)
# gpath = ibs.get_annot_chip_fpath(aid)
# image = ap.open_oriented_image(gpath)
# image_src = ap.embed_image_html(image)
# else:
# gid = None
# gpath = None
# image_src = None
# enctext = ibs.get_encounter_text(eid)
# return ap.template('turk', 'quality',
# eid=eid,
# src_ag=src_ag,
# dst_ag=dst_ag,
# gid=gid,
# aid=aid,
# value=value,
# image_path=gpath,
# image_src=image_src,
# previous=previous,
# enctext=enctext,
# progress=progress,
# finished=finished,
# display_instructions=display_instructions,
# review=review)
@register_route('/turk/additional')
def turk_additional():
ibs = current_app.ibs
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = ibs.get_valid_gids(eid=eid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_nids(aid_list)
reviewed_list = encounter_annot_additional_processed(ibs, aid_list, nid_list)
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
enctext = None if eid is None else ibs.get_encounter_text(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value_sex = ibs.get_annot_sex([aid])[0]
if value_sex >= 0:
value_sex += 2
else:
value_sex = None
value_age_min, value_age_max = ibs.get_annot_age_months_est([aid])[0]
value_age = None
if (value_age_min is -1 or value_age_min is None) and (value_age_max is -1 or value_age_max is None):
value_age = 1
if (value_age_min is 0 or value_age_min is None) and value_age_max == 2:
value_age = 2
elif value_age_min is 3 and value_age_max == 5:
value_age = 3
elif value_age_min is 6 and value_age_max == 11:
value_age = 4
elif value_age_min is 12 and value_age_max == 23:
value_age = 5
elif value_age_min is 24 and value_age_max == 35:
value_age = 6
elif value_age_min is 36 and (value_age_max > 36 or value_age_max is None):
value_age = 7
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('additional_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
name_aid_list = None
nid = ibs.get_annot_name_rowids(aid)
if nid is not None:
name_aid_list = ibs.get_name_aids(nid)
quality_list = ibs.get_annot_qualities(name_aid_list)
quality_text_list = ibs.get_annot_quality_texts(name_aid_list)
yaw_text_list = ibs.get_annot_yaw_texts(name_aid_list)
name_aid_combined_list = list(zip(
name_aid_list,
quality_list,
quality_text_list,
yaw_text_list,
))
name_aid_combined_list.sort(key=lambda t: t[1], reverse=True)
return ap.template('turk', 'additional',
eid=eid,
gid=gid,
aid=aid,
value_sex=value_sex,
value_age=value_age,
image_path=gpath,
name_aid_combined_list=name_aid_combined_list,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/submit/detection', methods=['POST'])
def submit_detection():
ibs = current_app.ibs
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid = int(request.form['detection-gid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
# ibs.delete_images(gid)
# print('[web] (DELETED) turk_id: %s, gid: %d' % (turk_id, gid, ))
pass
elif method.lower() == 'clear':
aid_list = ibs.get_image_aids(gid)
ibs.delete_annots(aid_list)
print('[web] (CLEAERED) turk_id: %s, gid: %d' % (turk_id, gid, ))
redirection = request.referrer
if 'gid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&gid=%d' % (redirection, gid, )
else:
redirection = '%s?gid=%d' % (redirection, gid, )
return redirect(redirection)
else:
current_aid_list = ibs.get_image_aids(gid)
# Make new annotations
width, height = ibs.get_image_sizes(gid)
scale_factor = float(width) / float(ap.TARGET_WIDTH)
# Get aids
annotation_list = json.loads(request.form['detection-annotations'])
bbox_list = [
(
int(scale_factor * annot['left']),
int(scale_factor * annot['top']),
int(scale_factor * annot['width']),
int(scale_factor * annot['height']),
)
for annot in annotation_list
]
theta_list = [
float(annot['angle'])
for annot in annotation_list
]
survived_aid_list = [
None if annot['id'] is None else int(annot['id'])
for annot in annotation_list
]
species_list = [
annot['label']
for annot in annotation_list
]
# Delete annotations that didn't survive
kill_aid_list = list(set(current_aid_list) - set(survived_aid_list))
ibs.delete_annots(kill_aid_list)
for aid, bbox, theta, species in zip(survived_aid_list, bbox_list, theta_list, species_list):
if aid is None:
ibs.add_annots([gid], [bbox], theta_list=[theta], species_list=[species])
else:
ibs.set_annot_bboxes([aid], [bbox])
ibs.set_annot_thetas([aid], [theta])
ibs.set_annot_species([aid], [species])
ibs.set_image_reviewed([gid], [1])
print('[web] turk_id: %s, gid: %d, bbox_list: %r, species_list: %r' % (turk_id, gid, annotation_list, species_list))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_detection', eid=eid, previous=gid))
def movegroup_aid(ibs, aid, src_ag, dst_ag):
gar_rowid_list = ibs.get_annot_gar_rowids(aid)
annotgroup_rowid_list = ibs.get_gar_annotgroup_rowid(gar_rowid_list)
src_index = annotgroup_rowid_list.index(src_ag)
src_gar_rowid = gar_rowid_list[src_index]
vals = (aid, src_ag, src_gar_rowid, dst_ag)
print('Moving aid: %s from src_ag: %s (%s) to dst_ag: %s' % vals)
# ibs.delete_gar([src_gar_rowid])
ibs.add_gar([dst_ag], [aid])
@register_route('/submit/viewpoint', methods=['POST'])
def submit_viewpoint():
ibs = current_app.ibs
method = request.form.get('viewpoint-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
src_ag = request.args.get('src_ag', '')
src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
dst_ag = request.args.get('dst_ag', '')
dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
aid = int(request.form['viewpoint-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
if method.lower() == 'make junk':
ibs.set_annot_quality_texts([aid], [const.QUAL_JUNK])
print('[web] (SET AS JUNK) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
if method.lower() == 'rotate left':
theta = ibs.get_annot_thetas(aid)
theta = (theta + PI / 2) % TAU
ibs.set_annot_thetas(aid, theta)
(xtl, ytl, w, h) = ibs.get_annot_bboxes(aid)
diffx = int(round((w / 2.0) - (h / 2.0)))
diffy = int(round((h / 2.0) - (w / 2.0)))
xtl, ytl, w, h = xtl + diffx, ytl + diffy, h, w
ibs.set_annot_bboxes([aid], [(xtl, ytl, w, h)])
print('[web] (ROTATED LEFT) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
if method.lower() == 'rotate right':
theta = ibs.get_annot_thetas(aid)
theta = (theta - PI / 2) % TAU
ibs.set_annot_thetas(aid, theta)
(xtl, ytl, w, h) = ibs.get_annot_bboxes(aid)
diffx = int(round((w / 2.0) - (h / 2.0)))
diffy = int(round((h / 2.0) - (w / 2.0)))
xtl, ytl, w, h = xtl + diffx, ytl + diffy, h, w
ibs.set_annot_bboxes([aid], [(xtl, ytl, w, h)])
print('[web] (ROTATED RIGHT) turk_id: %s, aid: %d' % (turk_id, aid, ))
redirection = request.referrer
if 'aid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&aid=%d' % (redirection, aid, )
else:
redirection = '%s?aid=%d' % (redirection, aid, )
return redirect(redirection)
else:
if src_ag is not None and dst_ag is not None:
movegroup_aid(ibs, aid, src_ag, dst_ag)
value = int(request.form['viewpoint-value'])
yaw = convert_old_viewpoint_to_yaw(value)
ibs.set_annot_yaws([aid], [yaw], input_is_degrees=False)
print('[web] turk_id: %s, aid: %d, yaw: %d' % (turk_id, aid, yaw))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_viewpoint', eid=eid, src_ag=src_ag,
dst_ag=dst_ag, previous=aid))
@register_route('/submit/quality', methods=['POST'])
def submit_quality():
ibs = current_app.ibs
method = request.form.get('quality-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['quality-aid'])
turk_id = request.cookies.get('turk_id', -1)
src_ag = request.args.get('src_ag', '')
src_ag = None if src_ag == 'None' or src_ag == '' else int(src_ag)
dst_ag = request.args.get('dst_ag', '')
dst_ag = None if dst_ag == 'None' or dst_ag == '' else int(dst_ag)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
if src_ag is not None and dst_ag is not None:
movegroup_aid(ibs, aid, src_ag, dst_ag)
quality = int(request.form['quality-value'])
ibs.set_annot_qualities([aid], [quality])
print('[web] turk_id: %s, aid: %d, quality: %d' % (turk_id, aid, quality))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_quality', eid=eid, src_ag=src_ag,
dst_ag=dst_ag, previous=aid))
@register_route('/submit/additional', methods=['POST'])
def submit_additional():
ibs = current_app.ibs
method = request.form.get('additional-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['additional-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
sex = int(request.form['additional-sex-value'])
age = int(request.form['additional-age-value'])
age_min = None
age_max = None
# Sex
if sex >= 2:
sex -= 2
else:
sex = -1
if age == 1:
age_min = None
age_max = None
elif age == 2:
age_min = None
age_max = 2
elif age == 3:
age_min = 3
age_max = 5
elif age == 4:
age_min = 6
age_max = 11
elif age == 5:
age_min = 12
age_max = 23
elif age == 6:
age_min = 24
age_max = 35
elif age == 7:
age_min = 36
age_max = None
ibs.set_annot_sex([aid], [sex])
nid = ibs.get_annot_name_rowids(aid)
DAN_SPECIAL_WRITE_AGE_TO_ALL_ANOTATIONS = True
if nid is not None and DAN_SPECIAL_WRITE_AGE_TO_ALL_ANOTATIONS:
aid_list = ibs.get_name_aids(nid)
ibs.set_annot_age_months_est_min(aid_list, [age_min] * len(aid_list))
ibs.set_annot_age_months_est_max(aid_list, [age_max] * len(aid_list))
else:
ibs.set_annot_age_months_est_min([aid], [age_min])
ibs.set_annot_age_months_est_max([aid], [age_max])
print('[web] turk_id: %s, aid: %d, sex: %r, age: %r' % (turk_id, aid, sex, age))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_additional', eid=eid, previous=aid))
@register_route('/ajax/cookie')
def set_cookie():
response = make_response('true')
response.set_cookie(request.args['name'], request.args['value'])
print('[web] Set Cookie: %r -> %r' % (request.args['name'], request.args['value'], ))
return response
@register_route('/ajax/image/src/<gid>')
def image_src(gid=None, fresh=False, **kwargs):
ibs = current_app.ibs
# gpath = ibs.get_image_paths(gid)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
fresh = fresh or 'fresh' in request.args or 'fresh' in request.form
if fresh:
# print('*' * 80)
# print('\n\n')
# print('RUNNING WITH FRESH')
# print('\n\n')
# print('*' * 80)
# ut.remove_dirs(gpath)
import os
os.remove(gpath)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True)
return ap.return_src(gpath)
@register_api('/api/image/<gid>/', methods=['GET'])
def image_src_api(gid=None, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /api/image/<gid>/
"""
return image_src(gid, fresh=fresh, **kwargs)
@register_route('/api/image/view/<gid>/', methods=['GET'])
def image_view_api(gid=None, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /api/image/view/<gid>/
"""
encoded = image_src(gid, fresh=fresh, **kwargs)
return ap.template(None, 'single', encoded=encoded)
@register_api('/api/image/zip', methods=['POST'])
def image_upload_zip(**kwargs):
r"""
Returns the gid_list for image files submitted in a ZIP archive. The image
archive should be flat (no folders will be scanned for images) and must be smaller
than 100 MB. The archive can submit multiple images, ideally in JPEG format to save
space. Duplicate image uploads will result in the duplicate images receiving
the same gid based on the hashed pixel values.
Args:
image_zip_archive (binary): the POST variable containing the binary
(multi-form) image archive data
**kwargs: Arbitrary keyword arguments; the kwargs are passed down to
the add_images function
Returns:
gid_list (list if rowids): the list of gids corresponding to the images
submitted. The gids correspond to the image names sorted in
lexigraphical order.
RESTful:
Method: POST
URL: /api/image/zip
"""
ibs = current_app.ibs
# Get image archive
image_archive = request.files.get('image_zip_archive', None)
if image_archive is None:
raise IOError('Image archive not given')
# If the directory already exists, delete it
uploads_path = ibs.get_uploadsdir()
ut.ensuredir(uploads_path)
current_time = time.strftime('%Y_%m_%d_%H_%M_%S')
modifier = 1
upload_path = '%s' % (current_time)
while exists(upload_path):
upload_path = '%s_%04d' % (current_time, modifier)
modifier += 1
upload_path = join(uploads_path, upload_path)
ut.ensuredir(upload_path)
# Extract the content
try:
with zipfile.ZipFile(image_archive, 'r') as zfile:
zfile.extractall(upload_path)
except Exception:
ut.remove_dirs(upload_path)
raise IOError('Image archive extracton failed')
"""
test to ensure Directory and utool do the same thing
from detecttools.directory import Directory
upload_path = ut.truepath('~/Pictures')
gpath_list1 = sorted(ut.list_images(upload_path, recursive=False, full=True))
direct = Directory(upload_path, include_file_extensions='images', recursive=False)
gpath_list = direct.files()
gpath_list = sorted(gpath_list)
assert gpath_list1 == gpath_list
"""
gpath_list = sorted(ut.list_images(upload_path, recursive=False, full=True))
#direct = Directory(upload_path, include_file_extensions='images', recursive=False)
#gpath_list = direct.files()
#gpath_list = sorted(gpath_list)
gid_list = ibs.add_images(gpath_list, **kwargs)
return gid_list
@register_api('/api/image/json/', methods=['POST'])
def add_images_json(ibs, image_uri_list, image_uuid_list, image_width_list,
image_height_list, image_orig_name_list=None, image_ext_list=None,
image_time_posix_list=None, image_gps_lat_list=None,
image_gps_lon_list=None, image_notes_list=None, **kwargs):
"""
REST:
Method: POST
URL: /api/image/json/
Ignore:
sudo pip install boto
Args:
image_uri_list (list) : list of string image uris, most likely HTTP(S) or S3
encoded URLs. Alternatively, this can be a list of dictionaries (JSON
objects) that specify AWS S3 stored assets. An example below:
image_uri_list = [
'http://domain.com/example/asset1.png',
'/home/example/Desktop/example/asset2.jpg',
's3://s3.amazon.com/example-bucket-2/asset1-in-bucket-2.tif',
{
'bucket' : 'example-bucket-1',
'key' : 'example/asset1.png',
'auth_domain' : None, # Uses localhost
'auth_access_id' : None, # Uses system default
'auth_secret_key' : None, # Uses system default
},
{
'bucket' : 'example-bucket-1',
'key' : 'example/asset2.jpg',
# if unspecified, auth uses localhost and system defaults
},
{
'bucket' : 'example-bucket-2',
'key' : 'example/asset1-in-bucket-2.tif',
'auth_domain' : 's3.amazon.com',
'auth_access_id' : '____________________',
'auth_secret_key' : '________________________________________',
},
]
Note that you cannot specify AWS authentication access ids or secret keys
using string uri's. For specific authentication methods, please use the
latter list of dictionaries.
image_uuid_list (list of str) : list of image UUIDs to be used in IBEIS IA
image_width_list (list of int) : list of image widths
image_height_list (list of int) : list of image heights
image_orig_name_list (list of str): list of original image names
image_ext_list (list of str): list of original image names
image_time_posix_list (list of int): list of image's POSIX timestamps
image_gps_lat_list (list of float): list of image's GPS latitude values
image_gps_lon_list (list of float): list of image's GPS longitude values
image_notes_list (list of str) : optional list of any related notes with
the images
**kwargs : key-value pairs passed to the ibs.add_images() function.
CommandLine:
python -m ibeis.web.app --test-add_images_json
Example:
>>> # WEB_DOCTEST
>>> import uuid
>>> import ibeis
>>> from ibeis.control.IBEISControl import * # NOQA
>>> web_instance = ibeis.opendb(db='testdb1')
>>> _payload = {
>>> 'image_uri_list': [
>>> 'https://upload.wikimedia.org/wikipedia/commons/4/49/Zebra_running_Ngorongoro.jpg',
>>> {
>>> 'bucket' : 'test-asset-store',
>>> 'key' : 'caribwhale/20130903-JAC-0002.JPG',
>>> },
>>> ],
>>> 'image_uuid_list': [
>>> uuid.UUID('7fea8101-7dec-44e3-bf5d-b8287fd231e2'),
>>> uuid.UUID('c081119a-e08e-4863-a710-3210171d27d6'),
>>> ],
>>> 'image_width_list': [
>>> 1992,
>>> 1194,
>>> ],
>>> 'image_height_list': [
>>> 1328,
>>> 401,
>>> ],
>>> }
>>> gid_list = ibeis.web.app.add_images_json(web_instance, **_payload)
>>> print(gid_list)
>>> print(web_instance.get_image_uuids(gid_list))
>>> print(web_instance.get_image_uris(gid_list))
>>> print(web_instance.get_image_paths(gid_list))
>>> print(web_instance.get_image_uris_original(gid_list))
"""
import uuid
def _get_standard_ext(gpath):
ext = splitext(gpath)[1].lower()
return '.jpg' if ext == '.jpeg' else ext
def _parse_imageinfo(index):
def _resolve_uri():
list_ = image_uri_list
if list_ is None or index >= len(list_) or list_[index] is None:
raise ValueError('Must specify all required fields')
value = list_[index]
print(value, type(value))
if isinstance(value, dict):
value = ut.s3_dict_encode_to_str(value)
return value
def _resolve(list_, default='', assert_=False):
if list_ is None or index >= len(list_) or list_[index] is None:
if assert_:
raise ValueError('Must specify all required fields')
return default
return list_[index]
uri = _resolve_uri()
orig_gname = basename(uri)
ext = _get_standard_ext(uri)
uuid_ = _resolve(image_uuid_list, assert_=True)
if isinstance(uuid_, (str, unicode)):
uuid_ = uuid.UUID(uuid_)
param_tup = (
uuid_,
uri,
uri,
_resolve(image_orig_name_list, default=orig_gname),
_resolve(image_ext_list, default=ext),
int(_resolve(image_width_list, assert_=True)),
int(_resolve(image_height_list, assert_=True)),
int(_resolve(image_time_posix_list, default=-1)),
float(_resolve(image_gps_lat_list, default=-1.0)),
float(_resolve(image_gps_lon_list, default=-1.0)),
_resolve(image_notes_list),
)
return param_tup
# TODO: FIX ME SO THAT WE DON'T HAVE TO LOCALIZE EVERYTHING
kwargs['auto_localize'] = kwargs.get('auto_localize', True)
kwargs['sanitize'] = kwargs.get('sanitize', False)
index_list = range(len(image_uri_list))
params_gen = ut.generate(_parse_imageinfo, index_list, adjust=True,
force_serial=True, **kwargs)
params_gen = list(params_gen)
gpath_list = [ _[0] for _ in params_gen ]
gid_list = ibs.add_images(gpath_list, params_list=params_gen, **kwargs)
return gid_list
@register_api('/api/annot/json/', methods=['POST'])
def add_annots_json(ibs, image_uuid_list, annot_uuid_list, annot_bbox_list,
annot_theta_list=None, annot_species_list=None,
annot_name_list=None, annot_notes_list=None, **kwargs):
"""
REST:
Method: POST
URL: /api/annot/json/
Ignore:
sudo pip install boto
Args:
image_uuid_list (list of str) : list of image UUIDs to be used in IBEIS IA
annot_uuid_list (list of str) : list of annotations UUIDs to be used in IBEIS IA
annot_bbox_list (list of 4-tuple) : list of bounding box coordinates encoded as
a 4-tuple of the values (xtl, ytl, width, height) where xtl is the
'top left corner, x value' and ytl is the 'top left corner, y value'.
annot_theta_list (list of float) : list of radian rotation around center.
Defaults to 0.0 (no rotation).
annot_species_list (list of str) : list of species for the annotation, if known.
If the list is partially known, use None (null in JSON) for unknown entries.
annot_name_list (list of str) : list of names for the annotation, if known.
If the list is partially known, use None (null in JSON) for unknown entries.
annot_notes_list (list of str) : list of notes to be added to the annotation.
**kwargs : key-value pairs passed to the ibs.add_annots() function.
CommandLine:
python -m ibeis.web.app --test-add_annots_json
Example:
>>> import uuid
>>> import ibeis
>>> from ibeis.control.IBEISControl import * # NOQA
>>> web_instance = ibeis.opendb(db='testdb1')
>>> _payload = {
>>> 'image_uuid_list': [
>>> uuid.UUID('7fea8101-7dec-44e3-bf5d-b8287fd231e2'),
>>> uuid.UUID('c081119a-e08e-4863-a710-3210171d27d6'),
>>> ],
>>> 'annot_uuid_list': [
>>> uuid.UUID('fe1547c5-1425-4757-9b8f-b2b4a47f552d'),
>>> uuid.UUID('86d3959f-7167-4822-b99f-42d453a50745'),
>>> ],
>>> 'annot_bbox_list': [
>>> [0, 0, 1992, 1328],
>>>> [0, 0, 1194, 401],
>>> ],
>>> }
>>> aid_list = ibeis.web.app.add_annots_json(web_instance, **_payload)
>>> print(aid_list)
>>> print(web_instance.get_annot_image_uuids(aid_list))
>>> print(web_instance.get_annot_uuids(aid_list))
>>> print(web_instance.get_annot_bboxes(aid_list))
"""
import uuid
image_uuid_list = [
uuid.UUID(uuid_) if isinstance(uuid_, (str, unicode)) else uuid_
for uuid_ in image_uuid_list
]
gid_list = ibs.get_image_gids_from_uuid(image_uuid_list)
return ibs.add_annots(gid_list, annot_uuid_list=annot_uuid_list,
bbox_list=annot_bbox_list, theta_list=annot_theta_list,
species_list=annot_species_list, name_list=annot_name_list,
notes_list=annot_notes_list, **kwargs)
@register_api('/api/image/', methods=['POST'])
def image_upload(cleanup=True, **kwargs):
r"""
Returns the gid for an uploaded image.
Args:
image (image binary): the POST variable containing the binary
(multi-form) image data
**kwargs: Arbitrary keyword arguments; the kwargs are passed down to
the add_images function
Returns:
gid (rowids): gid corresponding to the image submitted.
lexigraphical order.
RESTful:
Method: POST
URL: /api/image/
"""
ibs = current_app.ibs
print('request.files = %s' % (request.files,))
filestore = request.files.get('image', None)
if filestore is None:
raise IOError('Image not given')
uploads_path = ibs.get_uploadsdir()
ut.ensuredir(uploads_path)
current_time = time.strftime('%Y_%m_%d_%H_%M_%S')
modifier = 1
upload_filename = 'upload_%s.png' % (current_time)
while exists(upload_filename):
upload_filename = 'upload_%s_%04d.png' % (current_time, modifier)
modifier += 1
upload_filepath = join(uploads_path, upload_filename)
filestore.save(upload_filepath)
gid_list = ibs.add_images([upload_filepath], **kwargs)
gid = gid_list[0]
if cleanup:
ut.remove_dirs(upload_filepath)
return gid
@register_api('/api/core/helloworld/', methods=['GET', 'POST', 'DELETE', 'PUT'])
def hello_world(*args, **kwargs):
print('------------------ HELLO WORLD ------------------')
print('Args:', args)
print('Kwargs:', kwargs)
print('request.args:', request.args)
print('request.form', request.form)
VALID_TURK_MODES = [
('turk_viewpoint', 'Viewpoint'),
('turk_quality', 'Quality'),
]
@register_route('/group_review/')
def group_review():
prefill = request.args.get('prefill', '')
if len(prefill) > 0:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
bad_species_list, bad_viewpoint_list = ibs.validate_annot_species_viewpoint_cnn(aid_list)
GROUP_BY_PREDICTION = True
if GROUP_BY_PREDICTION:
grouped_dict = ut.group_items(bad_viewpoint_list, ut.get_list_column(bad_viewpoint_list, 3))
grouped_list = grouped_dict.values()
regrouped_items = ut.flatten(ut.sortedby(grouped_list, map(len, grouped_list)))
candidate_aid_list = ut.get_list_column(regrouped_items, 0)
else:
candidate_aid_list = [ bad_viewpoint[0] for bad_viewpoint in bad_viewpoint_list]
elif request.args.get('aid_list', None) is not None:
aid_list = request.args.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
candidate_aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
candidate_aid_list = ''
else:
candidate_aid_list = ''
return ap.template(None, 'group_review', candidate_aid_list=candidate_aid_list, mode_list=VALID_TURK_MODES)
@register_route('/group_review/submit/', methods=['POST'])
def group_review_submit():
"""
CommandLine:
python -m ibeis.web.app --exec-group_review_submit
Example:
>>> # UNSTABLE_DOCTEST
>>> from ibeis.web.app import * # NOQA
>>> import ibeis
>>> import ibeis.web
>>> ibs = ibeis.opendb('testdb1')
>>> aid_list = ibs.get_valid_aids()[::2]
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
method = request.form.get('group-review-submit', '')
if method.lower() == 'populate':
redirection = request.referrer
if 'prefill' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&prefill=true' % (redirection, )
else:
redirection = '%s?prefill=true' % (redirection, )
return redirect(redirection)
aid_list = request.form.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
aid_list = []
src_ag, dst_ag = ibs.prepare_annotgroup_review(aid_list)
valid_modes = ut.get_list_column(VALID_TURK_MODES, 0)
mode = request.form.get('group-review-mode', None)
assert mode in valid_modes
return redirect(url_for(mode, src_ag=src_ag, dst_ag=dst_ag))
@register_route('/ajax/annotation/src/<aid>')
def annotation_src(aid=None):
ibs = current_app.ibs
gpath = ibs.get_annot_chip_fpath(aid)
return ap.return_src(gpath)
@register_api('/api/annot/<aid>/', methods=['GET'])
def annotation_src_api(aid=None):
r"""
Returns the base64 encoded image of annotation <aid>
RESTful:
Method: GET
URL: /api/annot/<aid>/
"""
return annotation_src(aid)
@register_route('/display/sightings')
def display_sightings(html_encode=True):
ibs = current_app.ibs
complete = request.args.get('complete', None) is not None
sightings = ibs.report_sightings_str(complete=complete, include_images=True)
if html_encode:
sightings = sightings.replace('\n', '<br/>')
return sightings
@register_route('/download/sightings')
def download_sightings():
filename = 'sightings.csv'
sightings = display_sightings(html_encode=False)
return ap.send_file(sightings, filename)
@register_route('/graph/sightings')
def graph_sightings():
return redirect(url_for('view'))
@register_route('/dbinfo')
def dbinfo():
try:
ibs = current_app.ibs
dbinfo_str = ibs.get_dbinfo_str()
except:
dbinfo_str = ''
dbinfo_str_formatted = '<pre>%s</pre>' % (dbinfo_str, )
return dbinfo_str_formatted
@register_route('/api')
def api():
rules = current_app.url_map.iter_rules()
rule_dict = {}
for rule in rules:
methods = rule.methods
url = str(rule)
if '/api/' in url:
methods -= set(['HEAD', 'OPTIONS'])
if len(methods) == 0:
continue
if len(methods) > 1:
print('methods = %r' % (methods,))
method = list(methods)[0]
if method not in rule_dict.keys():
rule_dict[method] = []
rule_dict[method].append((method, url, ))
for method in rule_dict.keys():
rule_dict[method].sort()
url = '%s/api/core/dbname/' % (current_app.server_url, )
app_auth = controller_inject.get_url_authorization(url)
return ap.template(None, 'api',
app_url=url,
app_name=controller_inject.GLOBAL_APP_NAME,
app_secret=controller_inject.GLOBAL_APP_SECRET,
app_auth=app_auth,
rule_list=rule_dict)
@register_route('/upload')
def upload():
return ap.template(None, 'upload')
@register_route('/404')
def error404(exception=None):
import traceback
exception_str = str(exception)
traceback_str = str(traceback.format_exc())
print('[web] %r' % (exception_str, ))
print('[web] %r' % (traceback_str, ))
return ap.template(None, '404', exception_str=exception_str,
traceback_str=traceback_str)
################################################################################
def start_tornado(ibs, port=None, browser=BROWSER, url_suffix=''):
"""
Initialize the web server
"""
def _start_tornado(ibs_, port_):
# Get Flask app
app = controller_inject.get_flask_app()
app.ibs = ibs_
# Try to ascertain the socket's domain name
try:
app.server_domain = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
app.server_domain = '127.0.0.1'
app.server_port = port_
# URL for the web instance
app.server_url = 'http://%s:%s' % (app.server_domain, app.server_port)
print('[web] Tornado server starting at %s' % (app.server_url,))
# Launch the web browser to view the web interface and API
if browser:
url = app.server_url + url_suffix
import webbrowser
print('[web] opening browser with url = %r' % (url,))
webbrowser.open(url)
# Start the tornado web handler
# WSGI = Web Server Gateway Interface
# WSGI is Python standard described in detail in PEP 3333
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(app.server_port)
tornado.ioloop.IOLoop.instance().start()
# Set logging level
logging.getLogger().setLevel(logging.INFO)
# Get the port if unspecified
if port is None:
port = DEFAULT_WEB_API_PORT
# Launch the web handler
_start_tornado(ibs, port)
def start_from_ibeis(ibs, port=None, browser=BROWSER, precache=None, url_suffix='', start_job_queue=True):
"""
Parse command line options and start the server.
CommandLine:
python -m ibeis --db PZ_MTEST --web
python -m ibeis --db PZ_MTEST --web --browser
"""
if precache is None:
precache = ut.get_argflag('--precache')
if precache:
print('[web] Pre-computing all image thumbnails (with annots)...')
ibs.preprocess_image_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
ibs.preprocess_image_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
ibs.check_chip_existence()
ibs.compute_all_chips()
if start_job_queue:
#from ibeis.web import zmq_task_queue
#ibs.load_plugin_module(zmq_task_queue)
#import time
#time.sleep(1)
ibs.initialize_job_manager()
#time.sleep(10)
start_tornado(ibs, port, browser, url_suffix)
ibs.close_job_manager()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.web.app
python -m ibeis.web.app --allexamples
python -m ibeis.web.app --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
# -*- coding: iso8859-1 -*-
#
# Copyright (C) 2003, 2004, 2005 Edgewall Software
# Copyright (C) 2003, 2004, 2005 Jonas Borgstrm <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: Jonas Borgstrm <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import generators
import time
from trac.core import TracError
from trac.ticket import TicketSystem
__all__ = ['Ticket', 'Type', 'Status', 'Resolution', 'Priority', 'Severity',
'Component', 'Version']
class Ticket(object):
def __init__(self, env, tkt_id=None, db=None):
self.env = env
self.fields = TicketSystem(self.env).get_ticket_fields()
self.values = {}
if tkt_id:
self._fetch_ticket(tkt_id, db)
else:
self._init_defaults(db)
self.id = self.time_created = self.time_changed = None
self._old = {}
exists = property(fget=lambda self: self.id is not None)
def _init_defaults(self, db=None):
for field in self.fields:
default = None
if not field.get('custom'):
default = self.env.config.get('ticket',
'default_' + field['name'])
else:
default = field.get('value')
options = field.get('options')
if default and options and default not in options:
try:
default_idx = int(default)
if default_idx > len(options):
raise ValueError
default = options[default_idx]
except ValueError:
self.env.log.warning('Invalid default value for '
'custom field "%s"'
% field['name'])
if default:
self.values.setdefault(field['name'], default)
def _fetch_ticket(self, tkt_id, db=None):
if not db:
db = self.env.get_db_cnx()
# Fetch the standard ticket fields
std_fields = [f['name'] for f in self.fields if not f.get('custom')]
cursor = db.cursor()
cursor.execute("SELECT %s,time,changetime FROM ticket WHERE id=%%s"
% ','.join(std_fields), (tkt_id,))
row = cursor.fetchone()
if not row:
raise TracError('Ticket %d does not exist.' % tkt_id,
'Invalid Ticket Number')
self.id = tkt_id
for i in range(len(std_fields)):
self.values[std_fields[i]] = row[i] or ''
self.time_created = row[len(std_fields)]
self.time_changed = row[len(std_fields) + 1]
# Fetch custom fields if available
custom_fields = [f['name'] for f in self.fields if f.get('custom')]
cursor.execute("SELECT name,value FROM ticket_custom WHERE ticket=%s",
(tkt_id,))
for name, value in cursor:
if name in custom_fields:
self.values[name] = value
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
"""Log ticket modifications so the table ticket_change can be updated"""
if self.values.has_key(name) and self.values[name] == value:
return
if not self._old.has_key(name): # Changed field
self._old[name] = self.values.get(name)
elif self._old[name] == value: # Change of field reverted
del self._old[name]
self.values[name] = value
def populate(self, values):
"""Populate the ticket with 'suitable' values from a dictionary"""
field_names = [f['name'] for f in self.fields]
for name in [name for name in values.keys() if name in field_names]:
self[name] = values.get(name, '')
# We have to do an extra trick to catch unchecked checkboxes
for name in [name for name in values.keys() if name[9:] in field_names
and name.startswith('checkbox_')]:
if not values.has_key(name[9:]):
self[name[9:]] = '0'
def insert(self, when=0, db=None):
"""Add ticket to database"""
assert not self.exists, 'Cannot insert an existing ticket'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
# Add a timestamp
if not when:
when = int(time.time())
self.time_created = self.time_changed = when
cursor = db.cursor()
# The owner field defaults to the component owner
if self.values.get('component') and not self.values.get('owner'):
try:
component = Component(self.env, self['component'], db=db)
if component.owner:
self['owner'] = component.owner
except TracError, e:
# Assume that no such component exists
pass
# Insert ticket record
std_fields = [f['name'] for f in self.fields if not f.get('custom')
and self.values.has_key(f['name'])]
cursor.execute("INSERT INTO ticket (%s,time,changetime) VALUES (%s)"
% (','.join(std_fields),
','.join(['%s'] * (len(std_fields) + 2))),
[self[name] for name in std_fields] +
[self.time_created, self.time_changed])
tkt_id = db.get_last_id(cursor, 'ticket')
# Insert custom fields
custom_fields = [f['name'] for f in self.fields if f.get('custom')
and self.values.has_key(f['name'])]
if custom_fields:
cursor.executemany("INSERT INTO ticket_custom (ticket,name,value) "
"VALUES (%s,%s,%s)", [(tkt_id, name, self[name])
for name in custom_fields])
if handle_ta:
db.commit()
self.id = tkt_id
self._old = {}
return self.id
def save_changes(self, author, comment, when=0, db=None):
"""
Store ticket changes in the database. The ticket must already exist in
the database.
"""
assert self.exists, 'Cannot update a new ticket'
if not self._old and not comment:
return # Not modified
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
if not when:
when = int(time.time())
if self.values.has_key('component'):
# If the component is changed on a 'new' ticket then owner field
# is updated accordingly. (#623).
if self.values.get('status') == 'new' \
and self._old.has_key('component') \
and not self._old.has_key('owner'):
try:
old_comp = Component(self.env, self._old['component'], db)
if old_comp.owner == self.values.get('owner'):
new_comp = Component(self.env, self['component'],
db)
self['owner'] = new_comp.owner
except TracError, e:
# If the old component has been removed from the database we
# just leave the owner as is.
pass
custom_fields = [f['name'] for f in self.fields if f.get('custom')]
for name in self._old.keys():
if name in custom_fields:
cursor.execute("SELECT * FROM ticket_custom "
"WHERE ticket=%s and name=%s", (self.id, name))
if cursor.fetchone():
cursor.execute("UPDATE ticket_custom SET value=%s "
"WHERE ticket=%s AND name=%s",
(self[name], self.id, name))
else:
cursor.execute("INSERT INTO ticket_custom (ticket,name,"
"value) VALUES(%s,%s,%s)",
(self.id, name, self[name]))
else:
cursor.execute("UPDATE ticket SET %s=%%s WHERE id=%%s" % name,
(self[name], self.id))
cursor.execute("INSERT INTO ticket_change "
"(ticket,time,author,field,oldvalue,newvalue) "
"VALUES (%s, %s, %s, %s, %s, %s)",
(self.id, when, author, name, self._old[name],
self[name]))
if comment:
cursor.execute("INSERT INTO ticket_change "
"(ticket,time,author,field,oldvalue,newvalue) "
"VALUES (%s,%s,%s,'comment','',%s)",
(self.id, when, author, comment))
cursor.execute("UPDATE ticket SET changetime=%s WHERE id=%s",
(when, self.id))
if handle_ta:
db.commit()
self._old = {}
self.time_changed = when
def get_changelog(self, when=0, db=None):
"""Return the changelog as a list of tuples of the form
(time, author, field, oldvalue, newvalue).
"""
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
if when:
cursor.execute("SELECT time,author,field,oldvalue,newvalue "
"FROM ticket_change WHERE ticket=%s AND time=%s "
"UNION "
"SELECT time,author,'attachment',null,filename "
"FROM attachment WHERE id=%s AND time=%s "
"UNION "
"SELECT time,author,'comment',null,description "
"FROM attachment WHERE id=%s AND time=%s "
"ORDER BY time",
(self.id, when, self.id, when, self.id, when))
else:
cursor.execute("SELECT time,author,field,oldvalue,newvalue "
"FROM ticket_change WHERE ticket=%s "
"UNION "
"SELECT time,author,'attachment',null,filename "
"FROM attachment WHERE id=%s "
"UNION "
"SELECT time,author,'comment',null,description "
"FROM attachment WHERE id=%s "
"ORDER BY time", (self.id, self.id, self.id))
log = []
for t, author, field, oldvalue, newvalue in cursor:
log.append((int(t), author, field, oldvalue or '', newvalue or ''))
return log
class AbstractEnum(object):
type = None
def __init__(self, env, name=None, db=None):
self.env = env
if name:
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT value FROM enum WHERE type=%s AND name=%s",
(self.type, name))
row = cursor.fetchone()
if not row:
raise TracError, '%s %s does not exist.' % (self.type, name)
self.value = self._old_value = row[0]
self.name = self._old_name = name
else:
self.value = self._old_value = None
self.name = self._old_name = None
exists = property(fget=lambda self: self._old_value is not None)
def delete(self, db=None):
assert self.exists, 'Cannot deleting non-existent %s' % self.type
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Deleting %s %s' % (self.type, self.name))
cursor.execute("DELETE FROM enum WHERE type=%s AND value=%s",
(self.type, self._old_value))
self.value = self._old_value = None
self.name = None
if handle_ta:
db.commit()
def insert(self, db=None):
assert self.name, 'Cannot create %s with no name' % self.type
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.debug("Creating new %s '%s'" % (self.type, self.name))
value = self.value
if not value:
cursor.execute(("SELECT COALESCE(MAX(%s),0) FROM enum "
"WHERE type=%%s") % db.cast('value', 'int'), (self.type,))
value = str(int(cursor.fetchone()[0]) + 1)
cursor.execute("INSERT INTO enum (type,name,value) VALUES (%s,%s,%s)",
(self.type, self.name, value))
if handle_ta:
db.commit()
def update(self, db=None):
assert self.exists, 'Cannot update non-existent %s' % self.type
assert self.name, 'Cannot update %s with no name' % self.type
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Updating %s "%s"' % (self.type, self.name))
cursor.execute("UPDATE enum SET name=%s,value=%s "
"WHERE type=%s AND name=%s",
(self.name, self.value, self.type, self._old_name))
if self.name != self._old_name:
# Update tickets
cursor.execute("UPDATE ticket SET %s=%%s WHERE %s=%%s" %
(self.type, self.type), (self.name, self._old_name))
self._old_name = self.name
self._old_value = self.value
if handle_ta:
db.commit()
def select(cls, env, db=None):
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT name,value FROM enum WHERE type=%s "
"ORDER BY value", (cls.type,))
for name, value in cursor:
obj = cls(env)
obj.name = obj._old_name = name
obj.value = obj._old_value = value
yield obj
select = classmethod(select)
class Type(AbstractEnum):
type = 'ticket_type'
class Status(AbstractEnum):
type = 'status'
class Resolution(AbstractEnum):
type = 'resolution'
class Priority(AbstractEnum):
type = 'priority'
class Severity(AbstractEnum):
type = 'severity'
class Component(object):
def __init__(self, env, name=None, db=None):
self.env = env
if name:
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT owner,description FROM component "
"WHERE name=%s", (name,))
row = cursor.fetchone()
if not row:
raise TracError, 'Component %s does not exist.' % name
self.name = self._old_name = name
self.owner = row[0] or None
self.description = row[1] or ''
else:
self.name = self._old_name = None
self.owner = None
self.description = None
exists = property(fget=lambda self: self._old_name is not None)
def delete(self, db=None):
assert self.exists, 'Cannot deleting non-existent component'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Deleting component %s' % self.name)
cursor.execute("DELETE FROM component WHERE name=%s", (self.name,))
self.name = self._old_name = None
if handle_ta:
db.commit()
def insert(self, db=None):
assert self.name, 'Cannot create component with no name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.debug("Creating new component '%s'" % self.name)
cursor.execute("INSERT INTO component (name,owner,description) "
"VALUES (%s,%s,%s)",
(self.name, self.owner, self.description))
if handle_ta:
db.commit()
def update(self, db=None):
assert self.exists, 'Cannot update non-existent component'
assert self.name, 'Cannot update component with no name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Updating component "%s"' % self.name)
cursor.execute("UPDATE component SET name=%s,owner=%s,description=%s "
"WHERE name=%s",
(self.name, self.owner, self.description,
self._old_name))
if self.name != self._old_name:
# Update tickets
cursor.execute("UPDATE ticket SET component=%s WHERE component=%s",
(self.name, self._old_name))
self._old_name = self.name
if handle_ta:
db.commit()
def select(cls, env, db=None):
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT name,owner,description FROM component "
"ORDER BY name")
for name, owner, description in cursor:
component = cls(env)
component.name = name
component.owner = owner or None
component.description = description or ''
yield component
select = classmethod(select)
class Version(object):
def __init__(self, env, name=None, db=None):
self.env = env
if name:
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT time,description FROM version "
"WHERE name=%s", (name,))
row = cursor.fetchone()
if not row:
raise TracError, 'Version %s does not exist.' % name
self.name = self._old_name = name
self.time = row[0] and int(row[0]) or None
self.description = row[1] or ''
else:
self.name = self._old_name = None
self.time = None
self.description = None
exists = property(fget=lambda self: self._old_name is not None)
def delete(self, db=None):
assert self.exists, 'Cannot deleting non-existent version'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Deleting version %s' % self.name)
cursor.execute("DELETE FROM version WHERE name=%s", (self.name,))
self.name = self._old_name = None
if handle_ta:
db.commit()
def insert(self, db=None):
assert self.name, 'Cannot create version with no name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.debug("Creating new version '%s'" % self.name)
cursor.execute("INSERT INTO version (name,time,description) "
"VALUES (%s,%s,%s)",
(self.name, self.time, self.description))
if handle_ta:
db.commit()
def update(self, db=None):
assert self.exists, 'Cannot update non-existent version'
assert self.name, 'Cannot update version with no name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Updating version "%s"' % self.name)
cursor.execute("UPDATE version SET name=%s,time=%s,description=%s "
"WHERE name=%s",
(self.name, self.time, self.description,
self._old_name))
if self.name != self._old_name:
# Update tickets
cursor.execute("UPDATE ticket SET version=%s WHERE version=%s",
(self.name, self._old_name))
self._old_name = self.name
if handle_ta:
db.commit()
def select(cls, env, db=None):
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT name,time,description FROM version "
"ORDER BY COALESCE(time,0),name")
for name, time, description in cursor:
component = cls(env)
component.name = name
component.time = time and int(time) or None
component.description = description or ''
yield component
select = classmethod(select)
Fixed another postgresql type casting bug. Closes #2116 (again).
git-svn-id: eda3d06fcef731589ace1b284159cead3416df9b@2302 af82e41b-90c4-0310-8c96-b1721e28e2e2
# -*- coding: iso8859-1 -*-
#
# Copyright (C) 2003, 2004, 2005 Edgewall Software
# Copyright (C) 2003, 2004, 2005 Jonas Borgstrm <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: Jonas Borgstrm <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import generators
import time
from trac.core import TracError
from trac.ticket import TicketSystem
__all__ = ['Ticket', 'Type', 'Status', 'Resolution', 'Priority', 'Severity',
'Component', 'Version']
class Ticket(object):
def __init__(self, env, tkt_id=None, db=None):
self.env = env
self.fields = TicketSystem(self.env).get_ticket_fields()
self.values = {}
if tkt_id:
self._fetch_ticket(tkt_id, db)
else:
self._init_defaults(db)
self.id = self.time_created = self.time_changed = None
self._old = {}
exists = property(fget=lambda self: self.id is not None)
def _init_defaults(self, db=None):
for field in self.fields:
default = None
if not field.get('custom'):
default = self.env.config.get('ticket',
'default_' + field['name'])
else:
default = field.get('value')
options = field.get('options')
if default and options and default not in options:
try:
default_idx = int(default)
if default_idx > len(options):
raise ValueError
default = options[default_idx]
except ValueError:
self.env.log.warning('Invalid default value for '
'custom field "%s"'
% field['name'])
if default:
self.values.setdefault(field['name'], default)
def _fetch_ticket(self, tkt_id, db=None):
if not db:
db = self.env.get_db_cnx()
# Fetch the standard ticket fields
std_fields = [f['name'] for f in self.fields if not f.get('custom')]
cursor = db.cursor()
cursor.execute("SELECT %s,time,changetime FROM ticket WHERE id=%%s"
% ','.join(std_fields), (tkt_id,))
row = cursor.fetchone()
if not row:
raise TracError('Ticket %d does not exist.' % tkt_id,
'Invalid Ticket Number')
self.id = tkt_id
for i in range(len(std_fields)):
self.values[std_fields[i]] = row[i] or ''
self.time_created = row[len(std_fields)]
self.time_changed = row[len(std_fields) + 1]
# Fetch custom fields if available
custom_fields = [f['name'] for f in self.fields if f.get('custom')]
cursor.execute("SELECT name,value FROM ticket_custom WHERE ticket=%s",
(tkt_id,))
for name, value in cursor:
if name in custom_fields:
self.values[name] = value
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
"""Log ticket modifications so the table ticket_change can be updated"""
if self.values.has_key(name) and self.values[name] == value:
return
if not self._old.has_key(name): # Changed field
self._old[name] = self.values.get(name)
elif self._old[name] == value: # Change of field reverted
del self._old[name]
self.values[name] = value
def populate(self, values):
"""Populate the ticket with 'suitable' values from a dictionary"""
field_names = [f['name'] for f in self.fields]
for name in [name for name in values.keys() if name in field_names]:
self[name] = values.get(name, '')
# We have to do an extra trick to catch unchecked checkboxes
for name in [name for name in values.keys() if name[9:] in field_names
and name.startswith('checkbox_')]:
if not values.has_key(name[9:]):
self[name[9:]] = '0'
def insert(self, when=0, db=None):
"""Add ticket to database"""
assert not self.exists, 'Cannot insert an existing ticket'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
# Add a timestamp
if not when:
when = int(time.time())
self.time_created = self.time_changed = when
cursor = db.cursor()
# The owner field defaults to the component owner
if self.values.get('component') and not self.values.get('owner'):
try:
component = Component(self.env, self['component'], db=db)
if component.owner:
self['owner'] = component.owner
except TracError, e:
# Assume that no such component exists
pass
# Insert ticket record
std_fields = [f['name'] for f in self.fields if not f.get('custom')
and self.values.has_key(f['name'])]
cursor.execute("INSERT INTO ticket (%s,time,changetime) VALUES (%s)"
% (','.join(std_fields),
','.join(['%s'] * (len(std_fields) + 2))),
[self[name] for name in std_fields] +
[self.time_created, self.time_changed])
tkt_id = db.get_last_id(cursor, 'ticket')
# Insert custom fields
custom_fields = [f['name'] for f in self.fields if f.get('custom')
and self.values.has_key(f['name'])]
if custom_fields:
cursor.executemany("INSERT INTO ticket_custom (ticket,name,value) "
"VALUES (%s,%s,%s)", [(tkt_id, name, self[name])
for name in custom_fields])
if handle_ta:
db.commit()
self.id = tkt_id
self._old = {}
return self.id
def save_changes(self, author, comment, when=0, db=None):
"""
Store ticket changes in the database. The ticket must already exist in
the database.
"""
assert self.exists, 'Cannot update a new ticket'
if not self._old and not comment:
return # Not modified
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
if not when:
when = int(time.time())
if self.values.has_key('component'):
# If the component is changed on a 'new' ticket then owner field
# is updated accordingly. (#623).
if self.values.get('status') == 'new' \
and self._old.has_key('component') \
and not self._old.has_key('owner'):
try:
old_comp = Component(self.env, self._old['component'], db)
if old_comp.owner == self.values.get('owner'):
new_comp = Component(self.env, self['component'],
db)
self['owner'] = new_comp.owner
except TracError, e:
# If the old component has been removed from the database we
# just leave the owner as is.
pass
custom_fields = [f['name'] for f in self.fields if f.get('custom')]
for name in self._old.keys():
if name in custom_fields:
cursor.execute("SELECT * FROM ticket_custom "
"WHERE ticket=%s and name=%s", (self.id, name))
if cursor.fetchone():
cursor.execute("UPDATE ticket_custom SET value=%s "
"WHERE ticket=%s AND name=%s",
(self[name], self.id, name))
else:
cursor.execute("INSERT INTO ticket_custom (ticket,name,"
"value) VALUES(%s,%s,%s)",
(self.id, name, self[name]))
else:
cursor.execute("UPDATE ticket SET %s=%%s WHERE id=%%s" % name,
(self[name], self.id))
cursor.execute("INSERT INTO ticket_change "
"(ticket,time,author,field,oldvalue,newvalue) "
"VALUES (%s, %s, %s, %s, %s, %s)",
(self.id, when, author, name, self._old[name],
self[name]))
if comment:
cursor.execute("INSERT INTO ticket_change "
"(ticket,time,author,field,oldvalue,newvalue) "
"VALUES (%s,%s,%s,'comment','',%s)",
(self.id, when, author, comment))
cursor.execute("UPDATE ticket SET changetime=%s WHERE id=%s",
(when, self.id))
if handle_ta:
db.commit()
self._old = {}
self.time_changed = when
def get_changelog(self, when=0, db=None):
"""Return the changelog as a list of tuples of the form
(time, author, field, oldvalue, newvalue).
"""
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
if when:
cursor.execute("SELECT time,author,field,oldvalue,newvalue "
"FROM ticket_change WHERE ticket=%s AND time=%s "
"UNION "
"SELECT time,author,'attachment',null,filename "
"FROM attachment WHERE id=%s AND time=%s "
"UNION "
"SELECT time,author,'comment',null,description "
"FROM attachment WHERE id=%s AND time=%s "
"ORDER BY time",
(self.id, when, str(self.id), when, self.id, when))
else:
cursor.execute("SELECT time,author,field,oldvalue,newvalue "
"FROM ticket_change WHERE ticket=%s "
"UNION "
"SELECT time,author,'attachment',null,filename "
"FROM attachment WHERE id=%s "
"UNION "
"SELECT time,author,'comment',null,description "
"FROM attachment WHERE id=%s "
"ORDER BY time", (self.id, str(self.id), self.id))
log = []
for t, author, field, oldvalue, newvalue in cursor:
log.append((int(t), author, field, oldvalue or '', newvalue or ''))
return log
class AbstractEnum(object):
type = None
def __init__(self, env, name=None, db=None):
self.env = env
if name:
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT value FROM enum WHERE type=%s AND name=%s",
(self.type, name))
row = cursor.fetchone()
if not row:
raise TracError, '%s %s does not exist.' % (self.type, name)
self.value = self._old_value = row[0]
self.name = self._old_name = name
else:
self.value = self._old_value = None
self.name = self._old_name = None
exists = property(fget=lambda self: self._old_value is not None)
def delete(self, db=None):
assert self.exists, 'Cannot deleting non-existent %s' % self.type
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Deleting %s %s' % (self.type, self.name))
cursor.execute("DELETE FROM enum WHERE type=%s AND value=%s",
(self.type, self._old_value))
self.value = self._old_value = None
self.name = None
if handle_ta:
db.commit()
def insert(self, db=None):
assert self.name, 'Cannot create %s with no name' % self.type
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.debug("Creating new %s '%s'" % (self.type, self.name))
value = self.value
if not value:
cursor.execute(("SELECT COALESCE(MAX(%s),0) FROM enum "
"WHERE type=%%s") % db.cast('value', 'int'), (self.type,))
value = str(int(cursor.fetchone()[0]) + 1)
cursor.execute("INSERT INTO enum (type,name,value) VALUES (%s,%s,%s)",
(self.type, self.name, value))
if handle_ta:
db.commit()
def update(self, db=None):
assert self.exists, 'Cannot update non-existent %s' % self.type
assert self.name, 'Cannot update %s with no name' % self.type
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Updating %s "%s"' % (self.type, self.name))
cursor.execute("UPDATE enum SET name=%s,value=%s "
"WHERE type=%s AND name=%s",
(self.name, self.value, self.type, self._old_name))
if self.name != self._old_name:
# Update tickets
cursor.execute("UPDATE ticket SET %s=%%s WHERE %s=%%s" %
(self.type, self.type), (self.name, self._old_name))
self._old_name = self.name
self._old_value = self.value
if handle_ta:
db.commit()
def select(cls, env, db=None):
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT name,value FROM enum WHERE type=%s "
"ORDER BY value", (cls.type,))
for name, value in cursor:
obj = cls(env)
obj.name = obj._old_name = name
obj.value = obj._old_value = value
yield obj
select = classmethod(select)
class Type(AbstractEnum):
type = 'ticket_type'
class Status(AbstractEnum):
type = 'status'
class Resolution(AbstractEnum):
type = 'resolution'
class Priority(AbstractEnum):
type = 'priority'
class Severity(AbstractEnum):
type = 'severity'
class Component(object):
def __init__(self, env, name=None, db=None):
self.env = env
if name:
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT owner,description FROM component "
"WHERE name=%s", (name,))
row = cursor.fetchone()
if not row:
raise TracError, 'Component %s does not exist.' % name
self.name = self._old_name = name
self.owner = row[0] or None
self.description = row[1] or ''
else:
self.name = self._old_name = None
self.owner = None
self.description = None
exists = property(fget=lambda self: self._old_name is not None)
def delete(self, db=None):
assert self.exists, 'Cannot deleting non-existent component'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Deleting component %s' % self.name)
cursor.execute("DELETE FROM component WHERE name=%s", (self.name,))
self.name = self._old_name = None
if handle_ta:
db.commit()
def insert(self, db=None):
assert self.name, 'Cannot create component with no name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.debug("Creating new component '%s'" % self.name)
cursor.execute("INSERT INTO component (name,owner,description) "
"VALUES (%s,%s,%s)",
(self.name, self.owner, self.description))
if handle_ta:
db.commit()
def update(self, db=None):
assert self.exists, 'Cannot update non-existent component'
assert self.name, 'Cannot update component with no name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Updating component "%s"' % self.name)
cursor.execute("UPDATE component SET name=%s,owner=%s,description=%s "
"WHERE name=%s",
(self.name, self.owner, self.description,
self._old_name))
if self.name != self._old_name:
# Update tickets
cursor.execute("UPDATE ticket SET component=%s WHERE component=%s",
(self.name, self._old_name))
self._old_name = self.name
if handle_ta:
db.commit()
def select(cls, env, db=None):
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT name,owner,description FROM component "
"ORDER BY name")
for name, owner, description in cursor:
component = cls(env)
component.name = name
component.owner = owner or None
component.description = description or ''
yield component
select = classmethod(select)
class Version(object):
def __init__(self, env, name=None, db=None):
self.env = env
if name:
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT time,description FROM version "
"WHERE name=%s", (name,))
row = cursor.fetchone()
if not row:
raise TracError, 'Version %s does not exist.' % name
self.name = self._old_name = name
self.time = row[0] and int(row[0]) or None
self.description = row[1] or ''
else:
self.name = self._old_name = None
self.time = None
self.description = None
exists = property(fget=lambda self: self._old_name is not None)
def delete(self, db=None):
assert self.exists, 'Cannot deleting non-existent version'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Deleting version %s' % self.name)
cursor.execute("DELETE FROM version WHERE name=%s", (self.name,))
self.name = self._old_name = None
if handle_ta:
db.commit()
def insert(self, db=None):
assert self.name, 'Cannot create version with no name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.debug("Creating new version '%s'" % self.name)
cursor.execute("INSERT INTO version (name,time,description) "
"VALUES (%s,%s,%s)",
(self.name, self.time, self.description))
if handle_ta:
db.commit()
def update(self, db=None):
assert self.exists, 'Cannot update non-existent version'
assert self.name, 'Cannot update version with no name'
if not db:
db = self.env.get_db_cnx()
handle_ta = True
else:
handle_ta = False
cursor = db.cursor()
self.env.log.info('Updating version "%s"' % self.name)
cursor.execute("UPDATE version SET name=%s,time=%s,description=%s "
"WHERE name=%s",
(self.name, self.time, self.description,
self._old_name))
if self.name != self._old_name:
# Update tickets
cursor.execute("UPDATE ticket SET version=%s WHERE version=%s",
(self.name, self._old_name))
self._old_name = self.name
if handle_ta:
db.commit()
def select(cls, env, db=None):
if not db:
db = env.get_db_cnx()
cursor = db.cursor()
cursor.execute("SELECT name,time,description FROM version "
"ORDER BY COALESCE(time,0),name")
for name, time, description in cursor:
component = cls(env)
component.name = name
component.time = time and int(time) or None
component.description = description or ''
yield component
select = classmethod(select)
|
from Tkinter import *
window = Tk()
Delete foobar.py
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test end to end flow with:
Playback south plugin
FFT Filter on playback south plugin and Threshold on PI north
PI Server (C) plugin
"""
import http.client
import os
import json
import time
import pytest
import utils
import subprocess
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2019 Dianomic Systems Inc."
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
SVC_NAME = "playfilter"
CSV_NAME = "wind-data.csv"
CSV_HEADERS = "10 Min Std Dev,10 Min Sampled Avg"
NORTH_TASK_NAME = "NorthReadingsTo_PI"
ASSET = "e2e_fft_threshold"
class TestE2eFilterFFTThreshold:
def get_ping_status(self, foglamp_url):
_connection = http.client.HTTPConnection(foglamp_url)
_connection.request("GET", '/foglamp/ping')
r = _connection.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
def get_statistics_map(self, foglamp_url):
_connection = http.client.HTTPConnection(foglamp_url)
_connection.request("GET", '/foglamp/statistics')
r = _connection.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return utils.serialize_stats_map(jdoc)
def get_asset_tracking_details(self, foglamp_url, event=None):
_connection = http.client.HTTPConnection(foglamp_url)
uri = '/foglamp/track'
if event:
uri += '?event={}'.format(event)
_connection.request("GET", uri)
r = _connection.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
@pytest.fixture
def start_south_north(self, reset_and_start_foglamp, add_south, enable_schedule, remove_directories,
remove_data_file, south_branch, foglamp_url, add_filter, filter_branch,
start_north_pi_server_c, pi_host, pi_port, pi_token, asset_name=ASSET):
""" This fixture clone a south and north repo and starts both south and north instance
reset_and_start_foglamp: Fixture that resets and starts foglamp, no explicit invocation, called at start
add_south: Fixture that adds a south service with given configuration with enabled or disabled mode
remove_directories: Fixture that remove directories created during the tests
remove_data_file: Fixture that remove data file created during the tests
"""
# Define configuration of FogLAMP playback service
south_config = {"assetName": {"value": "{}".format(asset_name)},
"csvFilename": {"value": "{}".format(CSV_NAME)},
"fieldNames": {"value": "10 Min Std Dev"},
"ingestMode": {"value": "batch"}}
csv_dest = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/{}'.format(CSV_NAME))
csv_src_file = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'tests/system/python/data/{}'.format(CSV_NAME))
cmd = 'cp {} {}'.format(csv_src_file, csv_dest)
status = subprocess.call(cmd, shell=True)
if status != 0:
if status < 0:
print("Killed by signal", status)
else:
print("copy command failed with return code - ", status)
south_plugin = "playback"
add_south(south_plugin, south_branch, foglamp_url, service_name=SVC_NAME,
config=south_config, start_service=False)
filter_cfg_fft = {"asset": ASSET, "lowPass": "10", "highPass": "30", "enable": "true"}
add_filter("fft", filter_branch, "FFT Filter", filter_cfg_fft, foglamp_url, SVC_NAME)
# Since playback plugin reads all csv data at once, we cant keep it in enable mode before filter add
# enable service when all filters all applied
enable_schedule(foglamp_url, SVC_NAME)
start_north_pi_server_c(foglamp_url, pi_host, pi_port, pi_token, taskname=NORTH_TASK_NAME,
start_task=False)
# Add threshold filter at north side
filter_cfg_threshold = {"expression": "Band00 > 30", "enable": "true"}
# TODO: Apply a better expression with AND / OR with data points e.g. OR Band01 > 19
add_filter("threshold", filter_branch, "fltr_threshold", filter_cfg_threshold, foglamp_url, NORTH_TASK_NAME)
enable_schedule(foglamp_url, NORTH_TASK_NAME)
yield self.start_south_north
remove_directories("/tmp/foglamp-south-{}".format(south_plugin))
filters = ["fft", "threshold"]
for fltr in filters:
remove_directories("/tmp/foglamp-filter-{}".format(fltr))
remove_data_file(csv_dest)
def test_end_to_end(self, start_south_north, disable_schedule, foglamp_url, read_data_from_pi, pi_host, pi_admin,
pi_passwd, pi_db, wait_time, retries, skip_verify_north_interface):
""" Test that data is inserted in FogLAMP using playback south plugin &
FFT filter, and sent to PI after passing through threshold filter
start_south_north: Fixture that starts FogLAMP with south service, add filter and north instance
skip_verify_north_interface: Flag for assertion of data from Pi web API
Assertions:
on endpoint GET /foglamp/asset
on endpoint GET /foglamp/asset/<asset_name> with applied data processing filter value
data received from PI is same as data sent"""
time.sleep(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
self._verify_ingest(conn)
# disable schedule to stop the service and sending data
disable_schedule(foglamp_url, SVC_NAME)
ping_response = self.get_ping_status(foglamp_url)
assert 6 == ping_response["dataRead"]
if not skip_verify_north_interface:
assert 1 == ping_response["dataSent"]
actual_stats_map = self.get_statistics_map(foglamp_url)
assert 6 == actual_stats_map[ASSET.upper() + " FFT"]
assert 6 == actual_stats_map['READINGS']
if not skip_verify_north_interface:
assert 1 == actual_stats_map['Readings Sent']
assert 1 == actual_stats_map[NORTH_TASK_NAME]
if not skip_verify_north_interface:
self._verify_egress(read_data_from_pi, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries)
tracking_details = self.get_asset_tracking_details(foglamp_url, "Ingest")
assert len(tracking_details["track"]), "Failed to track Ingest event"
tracked_item = tracking_details["track"][0]
assert "playfilter" == tracked_item["service"]
assert "e2e_fft_threshold FFT" == tracked_item["asset"]
assert "playback" == tracked_item["plugin"]
tracking_details = self.get_asset_tracking_details(foglamp_url, "Filter")
assert len(tracking_details["track"]), "Failed to track Ingest event"
tracked_item = tracking_details["track"][0]
assert "playfilter" == tracked_item["service"]
assert "e2e_fft_threshold FFT" == tracked_item["asset"]
assert "FFT Filter" == tracked_item["plugin"]
if not skip_verify_north_interface:
egress_tracking_details = self.get_asset_tracking_details(foglamp_url,"Egress")
assert len(egress_tracking_details["track"]), "Failed to track Egress event"
tracked_item = egress_tracking_details["track"][0]
assert "NorthReadingsTo_PI" == tracked_item["service"]
assert "e2e_fft_threshold FFT" == tracked_item["asset"]
assert "PI_Server_V2" == tracked_item["plugin"]
def _verify_ingest(self, conn):
conn.request("GET", '/foglamp/asset')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc)
assert ASSET + " FFT" == jdoc[0]["assetCode"]
assert 0 < jdoc[0]["count"]
conn.request("GET", '/foglamp/asset/{}'.format(ASSET + "%20FFT"))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 0 < len(jdoc)
# print(jdoc)
read = jdoc[0]["reading"]
assert read["Band00"]
assert read["Band01"]
assert read["Band02"]
def _verify_egress(self, read_data_from_pi, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries):
retry_count = 0
data_from_pi = None
while (data_from_pi is None or data_from_pi == []) and retry_count < retries:
data_from_pi = read_data_from_pi(pi_host, pi_admin, pi_passwd, pi_db,
ASSET + " FFT", {"Band00"})
retry_count += 1
time.sleep(wait_time * 2)
if data_from_pi is None or retry_count == retries:
assert False, "Failed to read data from PI"
assert 30 < data_from_pi["Band00"][-1]
Commented the code and added it as TODO
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test end to end flow with:
Playback south plugin
FFT Filter on playback south plugin and Threshold on PI north
PI Server (C) plugin
"""
import http.client
import os
import json
import time
import pytest
import utils
import subprocess
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2019 Dianomic Systems Inc."
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
SVC_NAME = "playfilter"
CSV_NAME = "wind-data.csv"
CSV_HEADERS = "10 Min Std Dev,10 Min Sampled Avg"
NORTH_TASK_NAME = "NorthReadingsTo_PI"
ASSET = "e2e_fft_threshold"
class TestE2eFilterFFTThreshold:
def get_ping_status(self, foglamp_url):
_connection = http.client.HTTPConnection(foglamp_url)
_connection.request("GET", '/foglamp/ping')
r = _connection.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
def get_statistics_map(self, foglamp_url):
_connection = http.client.HTTPConnection(foglamp_url)
_connection.request("GET", '/foglamp/statistics')
r = _connection.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return utils.serialize_stats_map(jdoc)
def get_asset_tracking_details(self, foglamp_url, event=None):
_connection = http.client.HTTPConnection(foglamp_url)
uri = '/foglamp/track'
if event:
uri += '?event={}'.format(event)
_connection.request("GET", uri)
r = _connection.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
@pytest.fixture
def start_south_north(self, reset_and_start_foglamp, add_south, enable_schedule, remove_directories,
remove_data_file, south_branch, foglamp_url, add_filter, filter_branch,
start_north_pi_server_c, pi_host, pi_port, pi_token, asset_name=ASSET):
""" This fixture clone a south and north repo and starts both south and north instance
reset_and_start_foglamp: Fixture that resets and starts foglamp, no explicit invocation, called at start
add_south: Fixture that adds a south service with given configuration with enabled or disabled mode
remove_directories: Fixture that remove directories created during the tests
remove_data_file: Fixture that remove data file created during the tests
"""
# Define configuration of FogLAMP playback service
south_config = {"assetName": {"value": "{}".format(asset_name)},
"csvFilename": {"value": "{}".format(CSV_NAME)},
"fieldNames": {"value": "10 Min Std Dev"},
"ingestMode": {"value": "batch"}}
csv_dest = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'data/{}'.format(CSV_NAME))
csv_src_file = os.path.join(os.path.expandvars('${FOGLAMP_ROOT}'), 'tests/system/python/data/{}'.format(CSV_NAME))
cmd = 'cp {} {}'.format(csv_src_file, csv_dest)
status = subprocess.call(cmd, shell=True)
if status != 0:
if status < 0:
print("Killed by signal", status)
else:
print("copy command failed with return code - ", status)
south_plugin = "playback"
add_south(south_plugin, south_branch, foglamp_url, service_name=SVC_NAME,
config=south_config, start_service=False)
filter_cfg_fft = {"asset": ASSET, "lowPass": "10", "highPass": "30", "enable": "true"}
add_filter("fft", filter_branch, "FFT Filter", filter_cfg_fft, foglamp_url, SVC_NAME)
# Since playback plugin reads all csv data at once, we cant keep it in enable mode before filter add
# enable service when all filters all applied
enable_schedule(foglamp_url, SVC_NAME)
start_north_pi_server_c(foglamp_url, pi_host, pi_port, pi_token, taskname=NORTH_TASK_NAME,
start_task=False)
# Add threshold filter at north side
filter_cfg_threshold = {"expression": "Band00 > 30", "enable": "true"}
# TODO: Apply a better expression with AND / OR with data points e.g. OR Band01 > 19
add_filter("threshold", filter_branch, "fltr_threshold", filter_cfg_threshold, foglamp_url, NORTH_TASK_NAME)
enable_schedule(foglamp_url, NORTH_TASK_NAME)
yield self.start_south_north
remove_directories("/tmp/foglamp-south-{}".format(south_plugin))
filters = ["fft", "threshold"]
for fltr in filters:
remove_directories("/tmp/foglamp-filter-{}".format(fltr))
remove_data_file(csv_dest)
def test_end_to_end(self, start_south_north, disable_schedule, foglamp_url, read_data_from_pi, pi_host, pi_admin,
pi_passwd, pi_db, wait_time, retries, skip_verify_north_interface):
""" Test that data is inserted in FogLAMP using playback south plugin &
FFT filter, and sent to PI after passing through threshold filter
start_south_north: Fixture that starts FogLAMP with south service, add filter and north instance
skip_verify_north_interface: Flag for assertion of data from Pi web API
Assertions:
on endpoint GET /foglamp/asset
on endpoint GET /foglamp/asset/<asset_name> with applied data processing filter value
data received from PI is same as data sent"""
time.sleep(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
self._verify_ingest(conn)
# disable schedule to stop the service and sending data
disable_schedule(foglamp_url, SVC_NAME)
ping_response = self.get_ping_status(foglamp_url)
assert 6 == ping_response["dataRead"]
if not skip_verify_north_interface:
assert 1 == ping_response["dataSent"]
actual_stats_map = self.get_statistics_map(foglamp_url)
assert 6 == actual_stats_map[ASSET.upper() + " FFT"]
assert 6 == actual_stats_map['READINGS']
if not skip_verify_north_interface:
assert 1 == actual_stats_map['Readings Sent']
assert 1 == actual_stats_map[NORTH_TASK_NAME]
if not skip_verify_north_interface:
self._verify_egress(read_data_from_pi, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries)
tracking_details = self.get_asset_tracking_details(foglamp_url, "Ingest")
assert len(tracking_details["track"]), "Failed to track Ingest event"
tracked_item = tracking_details["track"][0]
assert "playfilter" == tracked_item["service"]
assert "e2e_fft_threshold FFT" == tracked_item["asset"]
assert "playback" == tracked_item["plugin"]
# TODO: Add asset tracker entry for fft and threshold filters
# tracking_details = self.get_asset_tracking_details(foglamp_url, "Filter")
# assert len(tracking_details["track"]), "Failed to track Ingest event"
# tracked_item = tracking_details["track"][0]
# assert "playfilter" == tracked_item["service"]
# assert "e2e_fft_threshold FFT" == tracked_item["asset"]
# assert "FFT Filter" == tracked_item["plugin"]
if not skip_verify_north_interface:
egress_tracking_details = self.get_asset_tracking_details(foglamp_url,"Egress")
assert len(egress_tracking_details["track"]), "Failed to track Egress event"
tracked_item = egress_tracking_details["track"][0]
assert "NorthReadingsTo_PI" == tracked_item["service"]
assert "e2e_fft_threshold FFT" == tracked_item["asset"]
assert "PI_Server_V2" == tracked_item["plugin"]
def _verify_ingest(self, conn):
conn.request("GET", '/foglamp/asset')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc)
assert ASSET + " FFT" == jdoc[0]["assetCode"]
assert 0 < jdoc[0]["count"]
conn.request("GET", '/foglamp/asset/{}'.format(ASSET + "%20FFT"))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 0 < len(jdoc)
# print(jdoc)
read = jdoc[0]["reading"]
assert read["Band00"]
assert read["Band01"]
assert read["Band02"]
def _verify_egress(self, read_data_from_pi, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries):
retry_count = 0
data_from_pi = None
while (data_from_pi is None or data_from_pi == []) and retry_count < retries:
data_from_pi = read_data_from_pi(pi_host, pi_admin, pi_passwd, pi_db,
ASSET + " FFT", {"Band00"})
retry_count += 1
time.sleep(wait_time * 2)
if data_from_pi is None or retry_count == retries:
assert False, "Failed to read data from PI"
assert 30 < data_from_pi["Band00"][-1]
|
"""
File Manager module to take care of data dir
"""
import cv2 as cv
import os
from os.path import *
from os import listdir
import glob
import numpy as np
class Sample_Manager():
"""
Class for managing files taken from kinect either for tr or ts
"""
"""Path to the multiple data being managed"""
#Data path
data_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) +"/data/"
#Detector path
detect_path = data_path + "detector/"
#Images path
img_path = data_path + "img/"
#Training path
tr_path = img_path + "tr/"
#Test path
ts_path = img_path + "ts/"
#.xml file with cascade detector for frontal faces
faceCascade = cv.CascadeClassifier(detect_path + "haarcascade_frontalface.xml")
"""Internal dir counters"""
img_ptr = 0; tr_ptr = 0; ts_ptr = 0
def __init__(self,mode="tr"):
"""
Initializes counter for training and test files by reading
the data through the state file.
"""
if mode=="tr": #Training mode
self.tr_counter = [x[0].split("/")[-1] for x in os.walk(self.tr_path)]
max = 0
for i in list(self.tr_counter):
try:
if int(i)>max:
max = int(i)
except:
continue
self.tr_counter=max
self.new_sampling("tr")
self.tr_ptr=self.tr_counter
self.to_dir(self.tr_ptr, "tr")
else:
self.ts_counter = [x[0].split("/")[-1] for x in os.walk(self.ts_path)]
max = 0
for i in list(self.ts_counter):
try:
if int(i)>max:
max = int(i)
except:
continue
self.ts_counter=max
self.new_sampling("ts")
self.ts_ptr=self.ts_counter
self.to_dir(self.ts_ptr, "ts")
"""def __del__(self):"""
def new_sampling(self,mode="tr"):
"""Creates new directory under the dir specified by 'mode'"""
if mode == "tr": #Creates new folder for training
self.tr_counter += 1
print "Path: ",self.tr_path+str(self.tr_counter)
os.makedirs(self.tr_path+str(self.tr_counter))
self.tr_ptr=self.tr_counter
else: #Creates new folder for test
self.ts_counter += 1
print "Path: ",self.ts_path+str(self.ts_counter)
os.makedirs(self.ts_path+str(self.ts_counter))
self.ts_ptr=self.ts_counter
self.img_ptr = 0
def store_samples(self,samples,mode="tr"):
"""
Gets the image samples (BW,RGB and Depth) and stores them in corresponding
dir indicated by ptr. Images are named according to img_ptr value
"""
print "tr pointer: ",self.tr_ptr
print "ts pointer: ",self.ts_ptr
if mode=="tr": path=self.tr_path+str(self.tr_ptr)+"/"
else: path=self.ts_path+str(self.ts_ptr)+"/"
try:
self.img_ptr += 1
print "Path: ",path
print "img_pointer: ",self.img_ptr
cv.imwrite(path+str(self.img_ptr)+"_bw.png",samples[0])
cv.imwrite(path+str(self.img_ptr)+"_depth.png",samples[1])
cv.imwrite(path+str(self.img_ptr)+"_rgb.png",samples[2])
np.save(path+str(self.img_ptr)+'_mtx.npy',samples[1])
except:
print "Images couldn't be saved"
def to_dir(self,dir_ptr,mode="tr"):
if mode == "tr":
if dir_ptr>self.tr_counter:
raise
else:
self.tr_ptr=dir_ptr
self.img_ptr = [ int(f.split("_")[0]) for f in listdir(self.tr_path+str(dir_ptr)+"/") if isfile(join(self.tr_path+str(dir_ptr)+"/",f)) ]
if len(self.img_ptr)==0:
self.img_ptr=0
else:
self.img_ptr = max(self.img_ptr)
print "Tr pointer: ",self.tr_ptr
print "Img pointer: ",self.img_ptr
else:
if dir_ptr>self.ts_counter:
raise
else:
self.ts_ptr=dir_ptr
self.img_ptr = [ int(f.split("_")[0]) for f in listdir(self.ts_path+str(dir_ptr)+"/") if isfile(join(self.ts_path+str(dir_ptr)+"/",f)) ]
if len(self.img_ptr)==0:
self.img_ptr=0
else:
self.img_ptr = max(self.img_ptr)
print "Ts pointer: ",self.ts_ptr
print "Img pointer: ",self.img_ptr
class Picture_Manager():
"""
Class for images management for preprocessing and recognition
"""
#Data path
data_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) +"/data/"
#Images path
img_path = data_path + "img/"
#Training path
tr_path = img_path + "tr/"
#Test path
ts_path = img_path + "ts/"
#Recognition path
rec_path = data_path + "recognition/"
def get_samples(self,mode="tr",type="bw"):
if mode=="tr":
path = self.tr_path
else:
path = self.ts_path
if type == "bw":
pattern = '*_bw.png'
elif type=="mtx":
pattern = '*_mtx.npy'
else:
pattern = '*_depth.png'
samples_matrix = None
samples_labels = None
"""Building up the matrixes"""
for label in os.listdir(path):
for img in glob.glob1(path+str(label),pattern):
if type=="bw" or type=="depth":
bw_img = cv.imread(path+str(label)+"/"+str(img),0)
img_vector = bw_img.reshape(bw_img.shape[0]*bw_img.shape[1])
else: #Depth matrix loading
img_vector = np.load(path+str(label)+"/"+str(img)).reshape(192*256)
try:
samples_matrix = np.vstack((samples_matrix,img_vector))
samples_labels = np.vstack((samples_labels,int(label)))
except:
samples_matrix = img_vector
samples_labels = int(label)
return samples_matrix, samples_labels
def save_samples(self,mode,type,data,labels):
if mode == "tr":
path = self.tr_path
else:
path = self.ts_path
if type == "mtx":
c_label = -1; counter = 1
for i in xrange(data.shape[0]):
if labels[i][0]!= c_label:
c_label = labels[i][0]
counter=1
mtx = data[i].reshape(256,192)
np.savetxt(path+str(labels[i][0])+"/"+str(counter)+"_nmtx.npy",mtx)
counter+=1
def load_model(self,mode,num_components,threshold):
if mode==1:
name = "eigenfaces.yml"
model = cv.createEigenFaceRecognizer(num_components,threshold)
try:
model.load(self.rec_path+name)
except:
print "There was no model"
else:
name = "fisherfaces.yml"
model = cv.createFisherFaceRecognizer()
try:
model.load(self.rec_path+name)
except:
print "There was no model"
return model
def save_model(self,mode,model):
if mode==1:
name = "eigenfaces.yml"
else:
name = "fisherfaces.yml"
model.save(self.rec_path+name)
#test = Picture_Manager()
#test.get_samples("tr")
Sample Manager class modifications
"""
File Manager module to take care of data dir
"""
import cv2 as cv
import os
from os.path import *
from os import listdir
import glob
import numpy as np
class Sample_Manager():
"""
Class for managing files taken from kinect either for tr or ts
"""
"""Path to the multiple data being managed"""
#Data path
data_path = "%s/data" % (os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
#Detector path
detect_path = "%s/detector/" % (data_path)
#Images path
img_path = "%s/img" % (data_path)
#Training path
tr_path = "%s/tr/" % (img_path)
#Test path
ts_path = "%s/ts/" %(img_path)
#.xml file with cascade detector for frontal faces
faceCascade = cv.CascadeClassifier(detect_path + "haarcascade_frontalface.xml")
def __init__(self):
"""Training set"""
self.tr_counter = sorted([x[0].split("/")[-1] for x in os.walk(self.tr_path)])
if not self.tr_counter[-1]=="":
self.tr_counter = int(self.tr_counter[-1])
else:
self.tr_counter = 0
"""Testing set"""
self.ts_counter = sorted([x[0].split("/")[-1] for x in os.walk(self.ts_path)])
if not self.ts_counter[-1]=="":
self.ts_counter = int(self.ts_counter[-1])
else:
self.ts_counter = 0
def __del__(self):
"""Deletes directories if empty"""
try:
files = sorted([x[0].split("/")[-1] for x in os.walk(self.tr_path+str(self.tr_counter)+"/")])
if len(files)==1:
os.removedirs(self.tr_path+str(self.tr_counter))
except:pass
###
try:
files = sorted([x[0].split("/")[-1] for x in os.walk(self.ts_path+str(self.ts_counter)+"/")])
if len(files)==1:
os.removedirs(self.ts_path+str(self.ts_counter))
except: pass
def new_sampling(self,mode="tr"):
"""Creates new directory under the dir specified by 'mode'"""
if mode == "tr": #Creates new folder for training
print "Path: ",self.tr_path+str(self.tr_counter)
self.tr_counter+=1
os.makedirs(self.tr_path+str(self.tr_counter))
else: #Creates new folder for test
print "Path: ",self.ts_path+str(self.ts_counter)
self.ts_counter += 1
os.makedirs(self.ts_path+str(self.ts_counter))
self.img_ptr = 0
def store_samples(self,samples,mode="tr"):
"""
Gets the image samples (BW,RGB and Depth) and stores them in corresponding
dir indicated by ptr. Images are named according to img_ptr value
"""
if mode=="tr": path=self.tr_path+str(self.tr_counter)+"/"
else: path=self.ts_path+str(self.ts_counter)+"/"
try:
self.img_ptr += 1
print "Path: ",path
print "img_pointer: ",self.img_ptr
cv.imwrite(path+str(self.img_ptr)+"_bw.png",samples[0])
cv.imwrite(path+str(self.img_ptr)+"_depth.png",samples[1])
cv.imwrite(path+str(self.img_ptr)+"_rgb.png",samples[2])
np.save(path+str(self.img_ptr)+'_mtx.npy',samples[1])
except:
print "Images couldn't be saved"
class Picture_Manager():
"""
Class for image management for preprocessing and recognition
"""
#Data path
data_path = "%s/data/"%(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
#Images path
img_path = "%simg/" % (data_path)
#Training path
tr_path = "%str/" % (img_path)
#Test path
ts_path = "%sts/" % (img_path)
#Recognition path
rec_path = "%srecognition/"%(data_path)
def get_samples(self,mode="tr",type="bw"):
if mode=="tr":
path = self.tr_path
else:
path = self.ts_path
if type == "bw":
pattern = '*_bw.png'
elif type=="mtx":
pattern = '*_mtx.npy'
else:
pattern = '*_depth.png'
samples_matrix = None
samples_labels = None
"""Building up the matrixes"""
for label in os.listdir(path):
for img in glob.glob1(path+str(label),pattern):
if type=="bw" or type=="depth":
bw_img = cv.imread(path+str(label)+"/"+str(img),0)
img_vector = bw_img.reshape(bw_img.shape[0]*bw_img.shape[1])
else: #Depth matrix loading
img_vector = np.load(path+str(label)+"/"+str(img)).reshape(192*256)
try:
samples_matrix = np.vstack((samples_matrix,img_vector))
samples_labels = np.vstack((samples_labels,int(label)))
except:
samples_matrix = img_vector
samples_labels = int(label)
return samples_matrix, samples_labels
def save_samples(self,mode,type,data,labels):
if mode == "tr":
path = self.tr_path
else:
path = self.ts_path
if type == "mtx":
c_label = -1; counter = 1
for i in xrange(data.shape[0]):
if labels[i][0]!= c_label:
c_label = labels[i][0]
counter=1
mtx = data[i].reshape(256,192)
np.savetxt(path+str(labels[i][0])+"/"+str(counter)+"_nmtx.npy",mtx)
counter+=1
def load_model(self,mode,num_components,threshold):
if mode==1:
name = "eigenfaces.yaml"
model = cv.createEigenFaceRecognizer(num_components,threshold)
else:
name = "fisherfaces.yaml"
model = cv.createFisherFaceRecognizer()
try:
model.load(self.rec_path+name)
except:
print "There was no model"
return model
def save_model(self,mode,model):
if mode==1:
name = "eigenfaces.yaml"
else:
name = "fisherfaces.yaml"
model.save(self.rec_path+name)
#test = Picture_Manager()
#test.get_samples("tr")
|
#!/usr/bin/env python
#
#3> <> rdfs:seeAlso <https://github.com/timrdf/csv2rdf4lod-automation/wiki/tic-turtle-in-comments> .
#3>
#3> <#> a doap:Project;
#3> dcterms:description "Download a URL and compute Functional Requirements for Bibliographic Resources (FRBR) stacks using cryptograhic digests for the resulting content.";
#3> doap:developer <http://tw.rpi.edu/instances/JamesMcCusker>;
#3> doap:helper <http://purl.org/twc/id/person/TimLebo>;
#3> rdfs:seeAlso <https://github.com/timrdf/csv2rdf4lod-automation/wiki/Script:-pcurl.py>;
#3> .
from rdflib import *
from surf import *
from fstack import *
import re, os
import rdflib
import hashlib
import httplib
from urlparse import urlparse, urlunparse
import dateutil.parser
import subprocess
import platform
from serializer import *
from StringIO import StringIO
# These are the namespaces we are using. They need to be added in
# order for the Object RDF Mapping tool to work.
ns.register(frbr="http://purl.org/vocab/frbr/core#")
ns.register(frir="http://purl.org/twc/ontology/frir.owl#")
ns.register(pexp="tag:tw.rpi.edu,2011:expression:")
ns.register(pmanif="tag:tw.rpi.edu,2011:manifestation:")
ns.register(pitem="tag:tw.rpi.edu,2011:item:")
ns.register(nfo="http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#")
ns.register(irw='http://www.ontologydesignpatterns.org/ont/web/irw.owl#')
ns.register(hash="di:")
ns.register(prov="http://dvcs.w3.org/hg/prov/raw-file/tip/ontology/ProvenanceOntology.owl#")
ns.register(http="http://www.w3.org/2011/http#")
ns.register(header="http://www.w3.org/2011/http-headers#")
ns.register(method="http://www.w3.org/2011/http-methods#")
ns.register(status="http://www.w3.org/2011/http-statusCodes#")
def call(command):
p = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = p.communicate()
return result
def getController(Agent):
return Agent(call('$CSV2RDF4LOD_HOME/bin/util/user-account.sh --cite')[0][1:-2])
connections = {'http':httplib.HTTPConnection,
'https':httplib.HTTPSConnection}
def getResponse(url):
o = urlparse(str(url))
#print o
connection = connections[o.scheme](o.netloc)
fullPath = urlunparse([None,None,o.path,o.params,o.query,o.fragment])
connection.request('GET',fullPath)
return connection.getresponse()
def pcurl(url):
ns.register(workurl=url+'#')
pStore = Store(reader="rdflib", writer="rdflib",
rdflib_store='IOMemory')
pSession = Session(pStore)
Work = pSession.get_class(ns.FRBR['Work'])
Agent = pSession.get_class(ns.PROV['Agent'])
Entity = pSession.get_class(ns.PROV['Entity'])
controller = getController(Agent)
work = Work(url)
works = set([url])
response = getResponse(url)
content = response.read()
originalWork = work
while response.status >= 300 and response.status < 400:
newURL = response.msg.dict['location']
if newURL in works:
raise Exception("Redirect loop")
works.add(newURL)
newWork = Work(newURL)
newWork.save()
work.irw_redirectsTo.append(newWork)
work.save()
work = newWork
response = getResponse(work.subject)
content = response.read()
if response.status != 200:
raise Exception(response.reason)
pSession.commit()
#work = originalWork
workURI = str(work.subject)
FileHash = work.session.get_class(ns.NFO['FileHash'])
ContentDigest = work.session.get_class(ns.FRIR['ContentDigest'])
Item = work.session.get_class(ns.FRBR['Item'])
Request = work.session.get_class(ns.HTTP['Request'])
RequestHeader = work.session.get_class(ns.HTTP['RequestHeader'])
Response = work.session.get_class(ns.HTTP['Response'])
ResponseHeader = work.session.get_class(ns.HTTP['ResponseHeader'])
Method = work.session.get_class(ns.HTTP["Method"])
GET = Method(ns.METHOD["GET"])
Manifestation = work.session.get_class(ns.FRBR['Manifestation'])
Expression = work.session.get_class(ns.FRBR['Expression'])
ProcessExecution = work.session.get_class(ns.PROV['ProcessExecution'])
o = urlparse(str(workURI))
filename = [f for f in o.path.split("/") if len(f) > 0][-1]
#print filename
f = open(filename,"wb+")
f.write(content)
f.close()
mimetype = response.msg.dict['content-type']
pStore, localItem = fstack(open(filename,'rb+'),filename,workURI,pStore,mimetype)
#localItem = Item(localItem.subject)
itemHashValue = createItemHash(url, response, content)
item = Response(ns.PITEM['-'.join(itemHashValue[:2])])
item.http_httpVersion = '1.1'
for field in response.msg.dict.keys():
header = ResponseHeader()
header.http_fieldName = field
header.http_fieldValue = response.msg.dict[field]
header.http_hdrName = ns.HEADER[field.lower()]
header.save()
item.http_headers.append(header)
item.nfo_hasHash.append(createHashInstance(itemHashValue,FileHash))
item.dcterms_date = dateutil.parser.parse(response.msg.dict['date'])
item.frbr_exemplarOf = localItem.frbr_exemplarOf
provF = open(filename+".prov.ttl","wb+")
localItem.frbr_reproductionOf.append(item)
getPE = Request()
getPE.http_methd = GET
getPE.http_requestURI = workURI
getPE.dcterms_date = localItem.dcterms_date
getPE.prov_hadRecipe.append(GET)
getPE.prov_wasControlledBy = controller
getPE.prov_used.append(item)
getPE.http_resp = item
localItem.prov_wasGeneratedBy = getPE
item.save()
localItem.save()
getPE.save()
pSession.commit()
bindPrefixes(pStore.reader.graph)
provF.write(pStore.reader.graph.serialize(format="turtle"))
def usage():
print '''usage: pcurl.py [--help|-h] [--format|-f xml|turtle|n3|nt] [url ...]
Download a URL and compute Functional Requirements for Bibliographic Resources
(FRBR) stacks using cryptograhic digests for the resulting content.
Refer to http://purl.org/twc/pub/mccusker2012parallel
for more information and examples.
optional arguments:
url url to compute a FRBR stack for.
-h, --help Show this help message and exit,
-f, --format File format for FRBR stacks. One of xml, turtle, n3, or nt.
'''
if __name__ == "__main__":
urls = []
i = 1
fileFormat = 'turtle'
extension = 'ttl'
if '-h' in sys.argv or '--help' in sys.argv:
usage()
quit()
while i < len(sys.argv):
if sys.argv[i] == '-f' or sys.argv[i] == '--format':
fileFormat = sys.argv[i+1]
try:
extension = typeExtensions[fileFormat]
except:
usage()
quit(1)
i += 1
else:
try:
o = urlparse(str(sys.argv[i]))
urls.append(sys.argv[i])
except:
usage()
quit(1)
i += 1
for arg in urls:
pcurl(arg)
updated prov namespace to http://www.w3.org/ns/prov#
#!/usr/bin/env python
#
#3> <> rdfs:seeAlso <https://github.com/timrdf/csv2rdf4lod-automation/wiki/tic-turtle-in-comments> .
#3>
#3> <#> a doap:Project;
#3> dcterms:description "Download a URL and compute Functional Requirements for Bibliographic Resources (FRBR) stacks using cryptograhic digests for the resulting content.";
#3> doap:developer <http://tw.rpi.edu/instances/JamesMcCusker>;
#3> doap:helper <http://purl.org/twc/id/person/TimLebo>;
#3> rdfs:seeAlso <https://github.com/timrdf/csv2rdf4lod-automation/wiki/Script:-pcurl.py>;
#3> .
from rdflib import *
from surf import *
from fstack import *
import re, os
import rdflib
import hashlib
import httplib
from urlparse import urlparse, urlunparse
import dateutil.parser
import subprocess
import platform
from serializer import *
from StringIO import StringIO
# These are the namespaces we are using. They need to be added in
# order for the Object RDF Mapping tool to work.
ns.register(frbr="http://purl.org/vocab/frbr/core#")
ns.register(frir="http://purl.org/twc/ontology/frir.owl#")
ns.register(pexp="tag:tw.rpi.edu,2011:expression:")
ns.register(pmanif="tag:tw.rpi.edu,2011:manifestation:")
ns.register(pitem="tag:tw.rpi.edu,2011:item:")
ns.register(nfo="http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#")
ns.register(irw='http://www.ontologydesignpatterns.org/ont/web/irw.owl#')
ns.register(hash="di:")
ns.register(prov="http://www.w3.org/ns/prov#")
ns.register(http="http://www.w3.org/2011/http#")
ns.register(header="http://www.w3.org/2011/http-headers#")
ns.register(method="http://www.w3.org/2011/http-methods#")
ns.register(status="http://www.w3.org/2011/http-statusCodes#")
def call(command):
p = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = p.communicate()
return result
def getController(Agent):
return Agent(call('$CSV2RDF4LOD_HOME/bin/util/user-account.sh --cite')[0][1:-2])
connections = {'http':httplib.HTTPConnection,
'https':httplib.HTTPSConnection}
def getResponse(url):
o = urlparse(str(url))
#print o
connection = connections[o.scheme](o.netloc)
fullPath = urlunparse([None,None,o.path,o.params,o.query,o.fragment])
connection.request('GET',fullPath)
return connection.getresponse()
def pcurl(url):
ns.register(workurl=url+'#')
pStore = Store(reader="rdflib", writer="rdflib",
rdflib_store='IOMemory')
pSession = Session(pStore)
Work = pSession.get_class(ns.FRBR['Work'])
Agent = pSession.get_class(ns.PROV['Agent'])
Entity = pSession.get_class(ns.PROV['Entity'])
controller = getController(Agent)
work = Work(url)
works = set([url])
response = getResponse(url)
content = response.read()
originalWork = work
while response.status >= 300 and response.status < 400:
newURL = response.msg.dict['location']
if newURL in works:
raise Exception("Redirect loop")
works.add(newURL)
newWork = Work(newURL)
newWork.save()
work.irw_redirectsTo.append(newWork)
work.save()
work = newWork
response = getResponse(work.subject)
content = response.read()
if response.status != 200:
raise Exception(response.reason)
pSession.commit()
#work = originalWork
workURI = str(work.subject)
FileHash = work.session.get_class(ns.NFO['FileHash'])
ContentDigest = work.session.get_class(ns.FRIR['ContentDigest'])
Item = work.session.get_class(ns.FRBR['Item'])
Request = work.session.get_class(ns.HTTP['Request'])
RequestHeader = work.session.get_class(ns.HTTP['RequestHeader'])
Response = work.session.get_class(ns.HTTP['Response'])
ResponseHeader = work.session.get_class(ns.HTTP['ResponseHeader'])
Method = work.session.get_class(ns.HTTP["Method"])
GET = Method(ns.METHOD["GET"])
Manifestation = work.session.get_class(ns.FRBR['Manifestation'])
Expression = work.session.get_class(ns.FRBR['Expression'])
ProcessExecution = work.session.get_class(ns.PROV['ProcessExecution'])
o = urlparse(str(workURI))
filename = [f for f in o.path.split("/") if len(f) > 0][-1]
#print filename
f = open(filename,"wb+")
f.write(content)
f.close()
mimetype = response.msg.dict['content-type']
pStore, localItem = fstack(open(filename,'rb+'),filename,workURI,pStore,mimetype)
#localItem = Item(localItem.subject)
itemHashValue = createItemHash(url, response, content)
item = Response(ns.PITEM['-'.join(itemHashValue[:2])])
item.http_httpVersion = '1.1'
for field in response.msg.dict.keys():
header = ResponseHeader()
header.http_fieldName = field
header.http_fieldValue = response.msg.dict[field]
header.http_hdrName = ns.HEADER[field.lower()]
header.save()
item.http_headers.append(header)
item.nfo_hasHash.append(createHashInstance(itemHashValue,FileHash))
item.dcterms_date = dateutil.parser.parse(response.msg.dict['date'])
item.frbr_exemplarOf = localItem.frbr_exemplarOf
provF = open(filename+".prov.ttl","wb+")
localItem.frbr_reproductionOf.append(item)
getPE = Request()
getPE.http_methd = GET
getPE.http_requestURI = workURI
getPE.dcterms_date = localItem.dcterms_date
getPE.prov_hadRecipe.append(GET)
getPE.prov_wasControlledBy = controller
getPE.prov_used.append(item)
getPE.http_resp = item
localItem.prov_wasGeneratedBy = getPE
item.save()
localItem.save()
getPE.save()
pSession.commit()
bindPrefixes(pStore.reader.graph)
provF.write(pStore.reader.graph.serialize(format="turtle"))
def usage():
print '''usage: pcurl.py [--help|-h] [--format|-f xml|turtle|n3|nt] [url ...]
Download a URL and compute Functional Requirements for Bibliographic Resources
(FRBR) stacks using cryptograhic digests for the resulting content.
Refer to http://purl.org/twc/pub/mccusker2012parallel
for more information and examples.
optional arguments:
url url to compute a FRBR stack for.
-h, --help Show this help message and exit,
-f, --format File format for FRBR stacks. One of xml, turtle, n3, or nt.
'''
if __name__ == "__main__":
urls = []
i = 1
fileFormat = 'turtle'
extension = 'ttl'
if '-h' in sys.argv or '--help' in sys.argv:
usage()
quit()
while i < len(sys.argv):
if sys.argv[i] == '-f' or sys.argv[i] == '--format':
fileFormat = sys.argv[i+1]
try:
extension = typeExtensions[fileFormat]
except:
usage()
quit(1)
i += 1
else:
try:
o = urlparse(str(sys.argv[i]))
urls.append(sys.argv[i])
except:
usage()
quit(1)
i += 1
for arg in urls:
pcurl(arg)
|
KIBOM_VERSION = "1.5"
KIBOM_DATE = "2018-3-15"
Update version.py
KIBOM_VERSION = "1.51"
KIBOM_DATE = "2018-8-10"
|
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO, SU, FR
from holidays.constants import FRI, SAT, SUN, WEEKEND
from holidays.constants import (
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.holiday_base import HolidayBase
class Canada(HolidayBase):
PROVINCES = [
"AB",
"BC",
"MB",
"NB",
"NL",
"NS",
"NT",
"NU",
"ON",
"PE",
"QC",
"SK",
"YT",
]
def __init__(self, **kwargs):
self.country = "CA"
self.prov = kwargs.pop("prov", "ON")
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
if year >= 1867:
name = "New Year's Day"
self[date(year, JAN, 1)] = name
if self.observed and date(year, JAN, 1).weekday() == SUN:
self[date(year, JAN, 1) + rd(days=+1)] = name + " (Observed)"
elif self.observed and date(year, JAN, 1).weekday() == SAT:
# Add Dec 31st from the previous year without triggering
# the entire year to be added
expand = self.expand
self.expand = False
self[date(year, JAN, 1) + rd(days=-1)] = name + " (Observed)"
self.expand = expand
# The next year's observed New Year's Day can be in this year
# when it falls on a Friday (Jan 1st is a Saturday)
if self.observed and date(year, DEC, 31).weekday() == FRI:
self[date(year, DEC, 31)] = name + " (Observed)"
# Family Day / Louis Riel Day (MB) / Islander Day (PE)
# / Heritage Day (NS, YT)
if self.prov in ("AB", "SK", "ON") and year >= 2008:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov in ("AB", "SK") and year >= 2007:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == "AB" and year >= 1990:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == "NB" and year >= 2018:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == "BC":
if year >= 2013 and year <= 2018:
self[date(year, FEB, 1) + rd(weekday=MO(+2))] = "Family Day"
elif year > 2018:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == "MB" and year >= 2008:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Louis Riel Day"
elif self.prov == "PE" and year >= 2010:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Islander Day"
elif self.prov == "PE" and year == 2009:
self[date(year, FEB, 1) + rd(weekday=MO(+2))] = "Islander Day"
elif self.prov == "NS" and year >= 2015:
# http://novascotia.ca/lae/employmentrights/NovaScotiaHeritageDay.asp
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Heritage Day"
elif self.prov == "YT":
# start date?
# http://heritageyukon.ca/programs/heritage-day
# https://en.wikipedia.org/wiki/Family_Day_(Canada)#Yukon_Heritage_Day
# Friday before the last Sunday in February
dt = date(year, MAR, 1) + rd(weekday=SU(-1)) + rd(weekday=FR(-1))
self[dt] = "Heritage Day"
# St. Patrick's Day
if self.prov == "NL" and year >= 1900:
dt = date(year, MAR, 17)
# Nearest Monday to March 17
dt1 = date(year, MAR, 17) + rd(weekday=MO(-1))
dt2 = date(year, MAR, 17) + rd(weekday=MO(+1))
if dt2 - dt <= dt - dt1:
self[dt2] = "St. Patrick's Day"
else:
self[dt1] = "St. Patrick's Day"
if year >= 1867:
# Good Friday
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
# Easter Monday
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
# St. George's Day
if self.prov == "NL" and year == 2010:
# 4/26 is the Monday closer to 4/23 in 2010
# but the holiday was observed on 4/19? Crazy Newfies!
self[date(2010, 4, 19)] = "St. George's Day"
elif self.prov == "NL" and year >= 1990:
dt = date(year, APR, 23)
# Nearest Monday to April 23
dt1 = dt + rd(weekday=MO(-1))
dt2 = dt + rd(weekday=MO(+1))
if dt2 - dt < dt - dt1:
self[dt2] = "St. George's Day"
else:
self[dt1] = "St. George's Day"
# Victoria Day / National Patriots' Day (QC)
if self.prov not in ("NB", "NS", "PE", "NL", "QC") and year >= 1953:
self[date(year, MAY, 24) + rd(weekday=MO(-1))] = "Victoria Day"
elif self.prov == "QC" and year >= 1953:
name = "National Patriots' Day"
self[date(year, MAY, 24) + rd(weekday=MO(-1))] = name
# National Aboriginal Day
if self.prov == "NT" and year >= 1996:
self[date(year, JUN, 21)] = "National Aboriginal Day"
# St. Jean Baptiste Day
if self.prov == "QC" and year >= 1925:
self[date(year, JUN, 24)] = "St. Jean Baptiste Day"
if self.observed and date(year, JUN, 24).weekday() == SUN:
self[date(year, JUN, 25)] = "St. Jean Baptiste Day (Observed)"
# Discovery Day
if self.prov == "NL" and year >= 1997:
dt = date(year, JUN, 24)
# Nearest Monday to June 24
dt1 = dt + rd(weekday=MO(-1))
dt2 = dt + rd(weekday=MO(+1))
if dt2 - dt <= dt - dt1:
self[dt2] = "Discovery Day"
else:
self[dt1] = "Discovery Day"
elif self.prov == "YT" and year >= 1912:
self[date(year, AUG, 1) + rd(weekday=MO(+3))] = "Discovery Day"
# Canada Day / Memorial Day (NL)
if self.prov != "NL" and year >= 1867:
if year >= 1983:
name = "Canada Day"
else:
name = "Dominion Day"
self[date(year, JUL, 1)] = name
if (
year >= 1879
and self.observed
and date(year, JUL, 1).weekday() in WEEKEND
):
self[date(year, JUL, 1) + rd(weekday=MO)] = (
name + " (Observed)"
)
elif year >= 1867:
if year >= 1983:
name = "Memorial Day"
else:
name = "Dominion Day"
self[date(year, JUL, 1)] = name
if (
year >= 1879
and self.observed
and date(year, JUL, 1).weekday() in WEEKEND
):
self[date(year, JUL, 1) + rd(weekday=MO)] = (
name + " (Observed)"
)
# Nunavut Day
if self.prov == "NU" and year >= 2001:
self[date(year, JUL, 9)] = "Nunavut Day"
if self.observed and date(year, JUL, 9).weekday() == SUN:
self[date(year, JUL, 10)] = "Nunavut Day (Observed)"
elif self.prov == "NU" and year == 2000:
self[date(2000, 4, 1)] = "Nunavut Day"
# Civic Holiday
if self.prov in ("ON", "MB", "NT") and year >= 1900:
self[date(year, AUG, 1) + rd(weekday=MO)] = "Civic Holiday"
elif self.prov == "AB" and year >= 1974:
# https://en.wikipedia.org/wiki/Civic_Holiday#Alberta
self[date(year, AUG, 1) + rd(weekday=MO)] = "Heritage Day"
elif self.prov == "BC" and year >= 1974:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, AUG, 1) + rd(weekday=MO)] = "British Columbia Day"
elif self.prov == "NB" and year >= 1900:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, AUG, 1) + rd(weekday=MO)] = "New Brunswick Day"
elif self.prov == "SK" and year >= 1900:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, AUG, 1) + rd(weekday=MO)] = "Saskatchewan Day"
# Labour Day
if year >= 1894:
self[date(year, SEP, 1) + rd(weekday=MO)] = "Labour Day"
# National Day for Truth and Reconciliation
provinces = ("MB", "NS")
if self.prov in provinces and year >= 2021:
self[date(year, SEP, 30)] = "National Day for Truth and Reconciliation"
# Thanksgiving
if self.prov not in ("NB", "NS", "PE", "NL") and year >= 1931:
if year == 1935:
# in 1935, Canadian Thanksgiving was moved due to the General
# Election falling on the second Monday of October
# https://books.google.ca/books?id=KcwlQsmheG4C&pg=RA1-PA1940&lpg=RA1-PA1940&dq=canada+thanksgiving+1935&source=bl&ots=j4qYrcfGuY&sig=gxXeAQfXVsOF9fOwjSMswPHJPpM&hl=en&sa=X&ved=0ahUKEwjO0f3J2PjOAhVS4mMKHRzKBLAQ6AEIRDAG#v=onepage&q=canada%20thanksgiving%201935&f=false
self[date(1935, 10, 25)] = "Thanksgiving"
else:
self[date(year, OCT, 1) + rd(weekday=MO(+2))] = "Thanksgiving"
# Remembrance Day
name = "Remembrance Day"
provinces = ("ON", "QC", "NS", "NL", "NT", "PE", "SK")
if self.prov not in provinces and year >= 1931:
self[date(year, NOV, 11)] = name
elif self.prov in ("NS", "NL", "NT", "PE", "SK") and year >= 1931:
self[date(year, NOV, 11)] = name
if self.observed and date(year, NOV, 11).weekday() == SUN:
name = name + " (Observed)"
self[date(year, NOV, 11) + rd(weekday=MO)] = name
# Christmas Day
if year >= 1867:
self[date(year, DEC, 25)] = "Christmas Day"
if self.observed and date(year, DEC, 25).weekday() == SAT:
self[date(year, DEC, 24)] = "Christmas Day (Observed)"
elif self.observed and date(year, DEC, 25).weekday() == SUN:
self[date(year, DEC, 26)] = "Christmas Day (Observed)"
# Boxing Day
if year >= 1867:
name = "Boxing Day"
name_observed = name + " (Observed)"
if self.observed and date(year, DEC, 26).weekday() in WEEKEND:
self[date(year, DEC, 26) + rd(weekday=MO)] = name_observed
elif self.observed and date(year, DEC, 26).weekday() == 0:
self[date(year, DEC, 27)] = name_observed
else:
self[date(year, DEC, 26)] = name
class CA(Canada):
pass
class CAN(Canada):
pass
flake8 compliance
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO, SU, FR
from holidays.constants import FRI, SAT, SUN, WEEKEND
from holidays.constants import (
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.holiday_base import HolidayBase
class Canada(HolidayBase):
PROVINCES = [
"AB",
"BC",
"MB",
"NB",
"NL",
"NS",
"NT",
"NU",
"ON",
"PE",
"QC",
"SK",
"YT",
]
def __init__(self, **kwargs):
self.country = "CA"
self.prov = kwargs.pop("prov", "ON")
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
if year >= 1867:
name = "New Year's Day"
self[date(year, JAN, 1)] = name
if self.observed and date(year, JAN, 1).weekday() == SUN:
self[date(year, JAN, 1) + rd(days=+1)] = name + " (Observed)"
elif self.observed and date(year, JAN, 1).weekday() == SAT:
# Add Dec 31st from the previous year without triggering
# the entire year to be added
expand = self.expand
self.expand = False
self[date(year, JAN, 1) + rd(days=-1)] = name + " (Observed)"
self.expand = expand
# The next year's observed New Year's Day can be in this year
# when it falls on a Friday (Jan 1st is a Saturday)
if self.observed and date(year, DEC, 31).weekday() == FRI:
self[date(year, DEC, 31)] = name + " (Observed)"
# Family Day / Louis Riel Day (MB) / Islander Day (PE)
# / Heritage Day (NS, YT)
if self.prov in ("AB", "SK", "ON") and year >= 2008:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov in ("AB", "SK") and year >= 2007:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == "AB" and year >= 1990:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == "NB" and year >= 2018:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == "BC":
if year >= 2013 and year <= 2018:
self[date(year, FEB, 1) + rd(weekday=MO(+2))] = "Family Day"
elif year > 2018:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Family Day"
elif self.prov == "MB" and year >= 2008:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Louis Riel Day"
elif self.prov == "PE" and year >= 2010:
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Islander Day"
elif self.prov == "PE" and year == 2009:
self[date(year, FEB, 1) + rd(weekday=MO(+2))] = "Islander Day"
elif self.prov == "NS" and year >= 2015:
# http://novascotia.ca/lae/employmentrights/NovaScotiaHeritageDay.asp
self[date(year, FEB, 1) + rd(weekday=MO(+3))] = "Heritage Day"
elif self.prov == "YT":
# start date?
# http://heritageyukon.ca/programs/heritage-day
# https://en.wikipedia.org/wiki/Family_Day_(Canada)#Yukon_Heritage_Day
# Friday before the last Sunday in February
dt = date(year, MAR, 1) + rd(weekday=SU(-1)) + rd(weekday=FR(-1))
self[dt] = "Heritage Day"
# St. Patrick's Day
if self.prov == "NL" and year >= 1900:
dt = date(year, MAR, 17)
# Nearest Monday to March 17
dt1 = date(year, MAR, 17) + rd(weekday=MO(-1))
dt2 = date(year, MAR, 17) + rd(weekday=MO(+1))
if dt2 - dt <= dt - dt1:
self[dt2] = "St. Patrick's Day"
else:
self[dt1] = "St. Patrick's Day"
if year >= 1867:
# Good Friday
self[easter(year) + rd(weekday=FR(-1))] = "Good Friday"
# Easter Monday
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
# St. George's Day
if self.prov == "NL" and year == 2010:
# 4/26 is the Monday closer to 4/23 in 2010
# but the holiday was observed on 4/19? Crazy Newfies!
self[date(2010, 4, 19)] = "St. George's Day"
elif self.prov == "NL" and year >= 1990:
dt = date(year, APR, 23)
# Nearest Monday to April 23
dt1 = dt + rd(weekday=MO(-1))
dt2 = dt + rd(weekday=MO(+1))
if dt2 - dt < dt - dt1:
self[dt2] = "St. George's Day"
else:
self[dt1] = "St. George's Day"
# Victoria Day / National Patriots' Day (QC)
if self.prov not in ("NB", "NS", "PE", "NL", "QC") and year >= 1953:
self[date(year, MAY, 24) + rd(weekday=MO(-1))] = "Victoria Day"
elif self.prov == "QC" and year >= 1953:
name = "National Patriots' Day"
self[date(year, MAY, 24) + rd(weekday=MO(-1))] = name
# National Aboriginal Day
if self.prov == "NT" and year >= 1996:
self[date(year, JUN, 21)] = "National Aboriginal Day"
# St. Jean Baptiste Day
if self.prov == "QC" and year >= 1925:
self[date(year, JUN, 24)] = "St. Jean Baptiste Day"
if self.observed and date(year, JUN, 24).weekday() == SUN:
self[date(year, JUN, 25)] = "St. Jean Baptiste Day (Observed)"
# Discovery Day
if self.prov == "NL" and year >= 1997:
dt = date(year, JUN, 24)
# Nearest Monday to June 24
dt1 = dt + rd(weekday=MO(-1))
dt2 = dt + rd(weekday=MO(+1))
if dt2 - dt <= dt - dt1:
self[dt2] = "Discovery Day"
else:
self[dt1] = "Discovery Day"
elif self.prov == "YT" and year >= 1912:
self[date(year, AUG, 1) + rd(weekday=MO(+3))] = "Discovery Day"
# Canada Day / Memorial Day (NL)
if self.prov != "NL" and year >= 1867:
if year >= 1983:
name = "Canada Day"
else:
name = "Dominion Day"
self[date(year, JUL, 1)] = name
if (
year >= 1879
and self.observed
and date(year, JUL, 1).weekday() in WEEKEND
):
self[date(year, JUL, 1) + rd(weekday=MO)] = (
name + " (Observed)"
)
elif year >= 1867:
if year >= 1983:
name = "Memorial Day"
else:
name = "Dominion Day"
self[date(year, JUL, 1)] = name
if (
year >= 1879
and self.observed
and date(year, JUL, 1).weekday() in WEEKEND
):
self[date(year, JUL, 1) + rd(weekday=MO)] = (
name + " (Observed)"
)
# Nunavut Day
if self.prov == "NU" and year >= 2001:
self[date(year, JUL, 9)] = "Nunavut Day"
if self.observed and date(year, JUL, 9).weekday() == SUN:
self[date(year, JUL, 10)] = "Nunavut Day (Observed)"
elif self.prov == "NU" and year == 2000:
self[date(2000, 4, 1)] = "Nunavut Day"
# Civic Holiday
if self.prov in ("ON", "MB", "NT") and year >= 1900:
self[date(year, AUG, 1) + rd(weekday=MO)] = "Civic Holiday"
elif self.prov == "AB" and year >= 1974:
# https://en.wikipedia.org/wiki/Civic_Holiday#Alberta
self[date(year, AUG, 1) + rd(weekday=MO)] = "Heritage Day"
elif self.prov == "BC" and year >= 1974:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, AUG, 1) + rd(weekday=MO)] = "British Columbia Day"
elif self.prov == "NB" and year >= 1900:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, AUG, 1) + rd(weekday=MO)] = "New Brunswick Day"
elif self.prov == "SK" and year >= 1900:
# https://en.wikipedia.org/wiki/Civic_Holiday
self[date(year, AUG, 1) + rd(weekday=MO)] = "Saskatchewan Day"
# Labour Day
if year >= 1894:
self[date(year, SEP, 1) + rd(weekday=MO)] = "Labour Day"
# National Day for Truth and Reconciliation
provinces = ("MB", "NS")
if self.prov in provinces and year >= 2021:
self[
date(year, SEP, 30)
] = "National Day for Truth and Reconciliation"
# Thanksgiving
if self.prov not in ("NB", "NS", "PE", "NL") and year >= 1931:
if year == 1935:
# in 1935, Canadian Thanksgiving was moved due to the General
# Election falling on the second Monday of October
# https://books.google.ca/books?id=KcwlQsmheG4C&pg=RA1-PA1940&lpg=RA1-PA1940&dq=canada+thanksgiving+1935&source=bl&ots=j4qYrcfGuY&sig=gxXeAQfXVsOF9fOwjSMswPHJPpM&hl=en&sa=X&ved=0ahUKEwjO0f3J2PjOAhVS4mMKHRzKBLAQ6AEIRDAG#v=onepage&q=canada%20thanksgiving%201935&f=false
self[date(1935, 10, 25)] = "Thanksgiving"
else:
self[date(year, OCT, 1) + rd(weekday=MO(+2))] = "Thanksgiving"
# Remembrance Day
name = "Remembrance Day"
provinces = ("ON", "QC", "NS", "NL", "NT", "PE", "SK")
if self.prov not in provinces and year >= 1931:
self[date(year, NOV, 11)] = name
elif self.prov in ("NS", "NL", "NT", "PE", "SK") and year >= 1931:
self[date(year, NOV, 11)] = name
if self.observed and date(year, NOV, 11).weekday() == SUN:
name = name + " (Observed)"
self[date(year, NOV, 11) + rd(weekday=MO)] = name
# Christmas Day
if year >= 1867:
self[date(year, DEC, 25)] = "Christmas Day"
if self.observed and date(year, DEC, 25).weekday() == SAT:
self[date(year, DEC, 24)] = "Christmas Day (Observed)"
elif self.observed and date(year, DEC, 25).weekday() == SUN:
self[date(year, DEC, 26)] = "Christmas Day (Observed)"
# Boxing Day
if year >= 1867:
name = "Boxing Day"
name_observed = name + " (Observed)"
if self.observed and date(year, DEC, 26).weekday() in WEEKEND:
self[date(year, DEC, 26) + rd(weekday=MO)] = name_observed
elif self.observed and date(year, DEC, 26).weekday() == 0:
self[date(year, DEC, 27)] = name_observed
else:
self[date(year, DEC, 26)] = name
class CA(Canada):
pass
class CAN(Canada):
pass
|
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS, ALL
from tastypie.authorization import Authorization
from tastypie import fields
from models import (HeadTeacher, SchoolData, TeacherPerformanceData,
LearnerPerformanceData, InboundSMS,
AcademicAchievementCode, DistrictAdminUser)
from django.conf.urls import url
# Project
from rts.utils import (CSVSerializer, CSVModelResource,
OverrideApiAuthentication)
class DistrictAdminUserResource(ModelResource):
"""
GET District Admin
::
"url": "<base_url>/api/v1/district_admin/,
"method": "GET",
POST FORMAT_MODULE_PATH
::
"url": "<base_url>/api/v1/district_admin/,
"method": "POST",
"content_type": "application/json",
"body": {"first_name": "test_first_name",
"last_name": "test_last_name",
"date_of_birth": "2012-10-12T10:00:00",
"district": "/api/v1/district/1/",
"id_number": "za123456789"}
"""
district = fields.ForeignKey("hierarchy.api.DistrictResource", 'district', full=True)
class Meta:
queryset = DistrictAdminUser.objects.all()
resource_name = "district_admin"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
class HeadTeacherResource(ModelResource):
"""
POSTING DATA
{
"url": "<base_url>/api/v1/data/headteacher/",,
"method": "POST",
"content_type": "application/json",
"body": {
"first_name": "abc",
"last_name": "def",
"created_at": "2012-10-12T10:00:00Z",
"date_of_birth": "1962-10-12T10:00:00Z",
"gender": "male",
"msisdn": "0726961764",
"emis": "/api/v1/school/emis/4811/"
}
GET SPECIFIC EMIS
"url": "<base_url>/api/v1/data/headteacher/?emis__emis=4817",,
"method": "GET",
IS_ZONAL_HEAD
"url": "<base_url>/api/v1/data/headteacher/?is_zonal_head=True",
"method": "GET",
# Filter can be [True, true, 1] for true and [False, false, 0] for false
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis', full=True)
class Meta:
queryset = HeadTeacher.objects.all()
resource_name = "data/headteacher"
list_allowed_methods = ['post', 'get', 'put']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'emis': ALL_WITH_RELATIONS,
'is_zonal_head': ALL}
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/emis/(?P<emis>[\w\d_.-]+)/$" %
self._meta.resource_name, self.wrap_view('dispatch_detail'),
name="api_dispatch_detail"),
]
class SchoolDataResource(ModelResource):
"""
POSTING DATA
"url": "<base_url>/api/v1/data/school/",
"method": "POST",
"content_type": "application/json",
"body": {
"name": "test_name",
"classrooms": 30,
"teachers": 40,
"teachers_g1": 4,
"teachers_g2": 8,
"boys_g2": 15,
"girls_g2": 12,
"created_by": "/api/data/headteacher/emis/4813/",
"emis": "/api/v1/school/emis/4813/"
}
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis', full=True)
created_by = fields.ForeignKey(HeadTeacherResource,
'created_by', full=True)
class Meta:
queryset = SchoolData.objects.all()
resource_name = "data/school"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'created_by': ALL_WITH_RELATIONS,
'emis': ALL_WITH_RELATIONS}
class AcademicAchievementCodeResource(ModelResource):
"""
GET SPECIFIC HEADTEACHER ON EMIS
"url": "<base_url>/api/v1/data/achievement/<id>/,,
"method": "GET",
"""
class Meta:
queryset = AcademicAchievementCode.objects.all()
resource_name = "data/achievement"
list_allowed_methods = ['get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
class TeacherPerformanceDataResource(ModelResource):
"""
POSTING DATA
"url": "<base_url>/api/v1/data/teacherperformance/",
"body": {
"data": "data",
"academic_level": "/api/data/achievement/8/",
"created_by": "/api/data/headteacher/emis/4813/",
"emis": "/api/v1/school/emis/4813/"
}
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis', full=True)
created_by = fields.ForeignKey(HeadTeacherResource,
'created_by', full=True)
academic_level = fields.ForeignKey(AcademicAchievementCodeResource,
'academic_level', full=True)
class Meta:
queryset = TeacherPerformanceData.objects.all()
resource_name = "data/teacherperformance"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'created_by': ALL_WITH_RELATIONS,
'emis': ALL_WITH_RELATIONS}
class LearnerPerformanceDataResource(ModelResource):
"""
POSTING DATA
"url": "<base_url>/api/v1/data/learnerperformance/",
"body": {
"data": "data",
"created_by": "/api/v1/data/headteacher/emis/4813/",
"emis": "/api/v1/school/emis/4813/"
}
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis', full=True)
created_by = fields.ForeignKey(HeadTeacherResource,
'created_by', full=True)
class Meta:
queryset = LearnerPerformanceData.objects.all()
resource_name = "data/learnerperformance"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'created_by': ALL_WITH_RELATIONS,
'emis': ALL_WITH_RELATIONS}
class InboundSMSResource(ModelResource):
"""
GET SMS
::
"url": "<base_url>/api/v1/data/sms/",,
"method": "GET",
POSTING DATA
"url": "<base_url>/api/v1/data/sms/",
"body": {
"message": "test_name",
"created_by": "/api/v1/data/sms/1/",
}
"""
created_by = fields.ForeignKey(HeadTeacherResource,
'created_by', full=True)
class Meta:
queryset = InboundSMS.objects.all()
resource_name = "data/sms"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'created_by': ALL_WITH_RELATIONS}
# =========================================================================
# This is the CSV download function
# =========================================================================
class HeadTeacherCSVDownloadResource(CSVModelResource):
"""
GET Head Teacher CSV
::
"url": "<base_url>/api/v1/csv/data/headteacher/?username=name&api_key=key,
"method": "GET",
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis')
class Meta:
queryset = HeadTeacher.objects.all()
resource_name = "csv/data/headteacher"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['emis'] = bundle.obj.emis.id
return bundle
class SchoolDataCSVDownloadResource(CSVModelResource):
"""
GET School Data CSV
::
"url": "<base_url>/api/v1/csv/data/school/?username=name&api_key=key,
"method": "GET",
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis')
created_by = fields.ForeignKey(HeadTeacherResource, 'created_by')
class Meta:
queryset = SchoolData.objects.all()
resource_name = "csv/data/school"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['emis'] = bundle.obj.emis.id
bundle.data['created_by'] = bundle.obj.created_by.id
return bundle
class AcademicAchievementCodeCSVDownloadResource(CSVModelResource):
"""
GET Academic Achievement Code CSV
::
"url": "<base_url>/api/v1/csv/data/achievement/?username=name&api_key=key,
"method": "GET",
"""
class Meta:
queryset = AcademicAchievementCode.objects.all()
resource_name = "csv/data/achievement"
list_allowed_methods = ['get']
authorization = Authorization()
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
class TeacherPerformanceDataCSVDownloadResource(CSVModelResource):
"""
GET Teacher Perfomance Data CSV
::
"url": "<base_url>/api/v1/csv/data/teacherperformance/?username=name&api_key=key,
"method": "GET",
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis')
created_by = fields.ForeignKey(HeadTeacherResource, 'created_by')
academic_level = fields.ForeignKey(AcademicAchievementCodeResource,
'academic_level')
class Meta:
queryset = TeacherPerformanceData.objects.all()
resource_name = "csv/data/teacherperformance"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['emis'] = bundle.obj.emis.id
bundle.data['created_by'] = bundle.obj.created_by.id
bundle.data['academic_level'] = bundle.obj.academic_level.id
return bundle
class LearnerPerformanceDataCSVDownloadResource(CSVModelResource):
"""
GET Learner Performance Data CSV
::
"url": "<base_url>/api/v1/csv/data/learnerperformance/?username=name&api_key=key,
"method": "GET",
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis')
created_by = fields.ForeignKey(HeadTeacherResource, 'created_by')
class Meta:
queryset = LearnerPerformanceData.objects.all()
resource_name = "csv/data/learnerperformance"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['emis'] = bundle.obj.emis.id
bundle.data['created_by'] = bundle.obj.created_by.id
return bundle
class InboundSMSCSVDownloadResource(CSVModelResource):
"""
GET Inbound SMS CSV
::
"url": "<base_url>/api/v1/csv/data/sms/?username=name&api_key=key,
"method": "GET",
"""
created_by = fields.ForeignKey(HeadTeacherResource, 'created_by')
class Meta:
queryset = InboundSMS.objects.all()
resource_name = "csv/data/sms"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['created_by'] = bundle.obj.created_by.id
return bundle
added related key to teacher performance data
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS, ALL
from tastypie.authorization import Authorization
from tastypie import fields
from models import (HeadTeacher, SchoolData, TeacherPerformanceData,
LearnerPerformanceData, InboundSMS,
AcademicAchievementCode, DistrictAdminUser)
from django.conf.urls import url
# Project
from rts.utils import (CSVSerializer, CSVModelResource,
OverrideApiAuthentication)
class DistrictAdminUserResource(ModelResource):
"""
GET District Admin
::
"url": "<base_url>/api/v1/district_admin/,
"method": "GET",
POST FORMAT_MODULE_PATH
::
"url": "<base_url>/api/v1/district_admin/,
"method": "POST",
"content_type": "application/json",
"body": {"first_name": "test_first_name",
"last_name": "test_last_name",
"date_of_birth": "2012-10-12T10:00:00",
"district": "/api/v1/district/1/",
"id_number": "za123456789"}
"""
district = fields.ForeignKey("hierarchy.api.DistrictResource", 'district', full=True)
class Meta:
queryset = DistrictAdminUser.objects.all()
resource_name = "district_admin"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
class HeadTeacherResource(ModelResource):
"""
POSTING DATA
{
"url": "<base_url>/api/v1/data/headteacher/",,
"method": "POST",
"content_type": "application/json",
"body": {
"first_name": "abc",
"last_name": "def",
"created_at": "2012-10-12T10:00:00Z",
"date_of_birth": "1962-10-12T10:00:00Z",
"gender": "male",
"msisdn": "0726961764",
"emis": "/api/v1/school/emis/4811/"
}
GET SPECIFIC EMIS
"url": "<base_url>/api/v1/data/headteacher/?emis__emis=4817",,
"method": "GET",
IS_ZONAL_HEAD
"url": "<base_url>/api/v1/data/headteacher/?is_zonal_head=True",
"method": "GET",
# Filter can be [True, true, 1] for true and [False, false, 0] for false
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis', full=True)
class Meta:
queryset = HeadTeacher.objects.all()
resource_name = "data/headteacher"
list_allowed_methods = ['post', 'get', 'put']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'emis': ALL_WITH_RELATIONS,
'is_zonal_head': ALL}
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/emis/(?P<emis>[\w\d_.-]+)/$" %
self._meta.resource_name, self.wrap_view('dispatch_detail'),
name="api_dispatch_detail"),
]
class SchoolDataResource(ModelResource):
"""
POSTING DATA
"url": "<base_url>/api/v1/data/school/",
"method": "POST",
"content_type": "application/json",
"body": {
"name": "test_name",
"classrooms": 30,
"teachers": 40,
"teachers_g1": 4,
"teachers_g2": 8,
"boys_g2": 15,
"girls_g2": 12,
"created_by": "/api/data/headteacher/emis/4813/",
"emis": "/api/v1/school/emis/4813/"
}
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis', full=True)
created_by = fields.ForeignKey(HeadTeacherResource,
'created_by', full=True)
class Meta:
queryset = SchoolData.objects.all()
resource_name = "data/school"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'created_by': ALL_WITH_RELATIONS,
'emis': ALL_WITH_RELATIONS}
class AcademicAchievementCodeResource(ModelResource):
"""
GET SPECIFIC HEADTEACHER ON EMIS
"url": "<base_url>/api/v1/data/achievement/<id>/,,
"method": "GET",
"""
class Meta:
queryset = AcademicAchievementCode.objects.all()
resource_name = "data/achievement"
list_allowed_methods = ['get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
class TeacherPerformanceDataResource(ModelResource):
"""
POSTING DATA
"url": "<base_url>/api/v1/data/teacherperformance/",
"body": {
"data": "data",
"academic_level": "/api/data/achievement/8/",
"created_by": "/api/data/headteacher/emis/4813/",
"emis": "/api/v1/school/emis/4813/"
}
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis', full=True)
created_by = fields.ForeignKey(HeadTeacherResource,
'created_by',
null=True,
full=True)
created_by_da = fields.ForeignKey(DistrictAdminUserResource,
'created_by_da',
null=True,
full=True)
academic_level = fields.ForeignKey(AcademicAchievementCodeResource,
'academic_level', full=True)
class Meta:
queryset = TeacherPerformanceData.objects.all()
resource_name = "data/teacherperformance"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'created_by': ALL_WITH_RELATIONS,
'emis': ALL_WITH_RELATIONS}
class LearnerPerformanceDataResource(ModelResource):
"""
POSTING DATA
"url": "<base_url>/api/v1/data/learnerperformance/",
"body": {
"data": "data",
"created_by": "/api/v1/data/headteacher/emis/4813/",
"emis": "/api/v1/school/emis/4813/"
}
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis', full=True)
created_by = fields.ForeignKey(HeadTeacherResource,
'created_by', full=True)
class Meta:
queryset = LearnerPerformanceData.objects.all()
resource_name = "data/learnerperformance"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'created_by': ALL_WITH_RELATIONS,
'emis': ALL_WITH_RELATIONS}
class InboundSMSResource(ModelResource):
"""
GET SMS
::
"url": "<base_url>/api/v1/data/sms/",,
"method": "GET",
POSTING DATA
"url": "<base_url>/api/v1/data/sms/",
"body": {
"message": "test_name",
"created_by": "/api/v1/data/sms/1/",
}
"""
created_by = fields.ForeignKey(HeadTeacherResource,
'created_by', full=True)
class Meta:
queryset = InboundSMS.objects.all()
resource_name = "data/sms"
list_allowed_methods = ['post', 'get']
authorization = Authorization()
include_resource_uri = True
always_return_data = True
filtering = {
'created_by': ALL_WITH_RELATIONS}
# =========================================================================
# This is the CSV download function
# =========================================================================
class HeadTeacherCSVDownloadResource(CSVModelResource):
"""
GET Head Teacher CSV
::
"url": "<base_url>/api/v1/csv/data/headteacher/?username=name&api_key=key,
"method": "GET",
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis')
class Meta:
queryset = HeadTeacher.objects.all()
resource_name = "csv/data/headteacher"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['emis'] = bundle.obj.emis.id
return bundle
class SchoolDataCSVDownloadResource(CSVModelResource):
"""
GET School Data CSV
::
"url": "<base_url>/api/v1/csv/data/school/?username=name&api_key=key,
"method": "GET",
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis')
created_by = fields.ForeignKey(HeadTeacherResource, 'created_by')
class Meta:
queryset = SchoolData.objects.all()
resource_name = "csv/data/school"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['emis'] = bundle.obj.emis.id
bundle.data['created_by'] = bundle.obj.created_by.id
return bundle
class AcademicAchievementCodeCSVDownloadResource(CSVModelResource):
"""
GET Academic Achievement Code CSV
::
"url": "<base_url>/api/v1/csv/data/achievement/?username=name&api_key=key,
"method": "GET",
"""
class Meta:
queryset = AcademicAchievementCode.objects.all()
resource_name = "csv/data/achievement"
list_allowed_methods = ['get']
authorization = Authorization()
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
class TeacherPerformanceDataCSVDownloadResource(CSVModelResource):
"""
GET Teacher Perfomance Data CSV
::
"url": "<base_url>/api/v1/csv/data/teacherperformance/?username=name&api_key=key,
"method": "GET",
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis')
created_by = fields.ForeignKey(HeadTeacherResource, 'created_by')
academic_level = fields.ForeignKey(AcademicAchievementCodeResource,
'academic_level')
class Meta:
queryset = TeacherPerformanceData.objects.all()
resource_name = "csv/data/teacherperformance"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['emis'] = bundle.obj.emis.id
bundle.data['created_by'] = bundle.obj.created_by.id
bundle.data['academic_level'] = bundle.obj.academic_level.id
return bundle
class LearnerPerformanceDataCSVDownloadResource(CSVModelResource):
"""
GET Learner Performance Data CSV
::
"url": "<base_url>/api/v1/csv/data/learnerperformance/?username=name&api_key=key,
"method": "GET",
"""
emis = fields.ForeignKey("hierarchy.api.SchoolResource", 'emis')
created_by = fields.ForeignKey(HeadTeacherResource, 'created_by')
class Meta:
queryset = LearnerPerformanceData.objects.all()
resource_name = "csv/data/learnerperformance"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['emis'] = bundle.obj.emis.id
bundle.data['created_by'] = bundle.obj.created_by.id
return bundle
class InboundSMSCSVDownloadResource(CSVModelResource):
"""
GET Inbound SMS CSV
::
"url": "<base_url>/api/v1/csv/data/sms/?username=name&api_key=key,
"method": "GET",
"""
created_by = fields.ForeignKey(HeadTeacherResource, 'created_by')
class Meta:
queryset = InboundSMS.objects.all()
resource_name = "csv/data/sms"
list_allowed_methods = ['get']
include_resource_uri = False
serializer = CSVSerializer() # Using custom serializer
authentication = OverrideApiAuthentication()
def dehydrate(self, bundle):
bundle.data['created_by'] = bundle.obj.created_by.id
return bundle
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import callBigDlFunc
from ..engine.topology import ZooKerasLayer
if sys.version >= '3':
long = int
unicode = str
class Embedding(ZooKerasLayer):
"""
Turn positive integers (indexes) into dense vectors of fixed size.
The input of this layer should be 2D.
This layer can only be used as the first layer in a model, you need to provide the argument
input_length (an integer) or input_shape (a shape tuple, does not include the batch dimension).
# Arguments
input_dim: Size of the vocabulary. Int > 0.
output_dim: Dimension of the dense embedding. Int >= 0.
init: String representation of the initialization method for the weights of the layer.
Default is 'uniform'.
W_regularizer: An instance of [[Regularizer]], (eg. L1 or L2 regularization),
applied to the embedding matrix. Default is None.
input_length: Positive int. The sequence length of each input.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> embedding = Embedding(1000, 32, input_length=10, name="embedding1")
creating: createZooKerasEmbedding
"""
def __init__(self, input_dim, output_dim, init="uniform", input_length=None,
W_regularizer=None, input_shape=None, **kwargs):
if input_length:
input_shape = (input_length,)
super(Embedding, self).__init__(None,
input_dim,
output_dim,
init,
W_regularizer,
list(input_shape) if input_shape else None,
**kwargs)
class WordEmbedding(ZooKerasLayer):
"""
Embedding layer with pre-trained weights for words.
Turn non-negative integers (indices) into dense vectors of fixed size.
Currently only GloVe embedding is supported.
The input of this layer should be 2D.
This layer can only be used as the first layer in a model, you need to provide the argument
input_length (an integer) or input_shape (a shape tuple, does not include the batch dimension).
# Arguments
embedding_file: The path to the embedding file.
Currently only the following GloVe files are supported:
"glove.6B.50d.txt", "glove.6B.100d.txt", "glove.6B.200d.txt"
"glove.6B.300d.txt", "glove.42B.300d.txt", "glove.840B.300d.txt".
You can download them from: https://nlp.stanford.edu/projects/glove/.
word_index: Dictionary of word (string) and its corresponding index (int).
The index is supposed to start from 1 with 0 reserved for unknown words.
During the prediction, if you have words that are not in the word_index
for the training, you can map them to index 0.
Default is None. In this case, all the words in the embedding_file will
be taken into account and you can call
WordEmbedding.get_word_index(embedding_file) to retrieve the dictionary.
trainable: To configure whether the weights of this layer will be updated or not.
Only False is supported for now.
input_length: Positive int. The sequence length of each input.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
"""
def __init__(self, embedding_file, word_index=None, trainable=False, input_length=None,
input_shape=None, **kwargs):
if input_length:
input_shape = (input_length, )
super(WordEmbedding, self).__init__(None,
embedding_file,
word_index,
trainable,
list(input_shape) if input_shape else None,
**kwargs)
@staticmethod
def get_word_index(embedding_file, bigdl_type="float"):
"""
Get the full wordIndex map from the given embedding_file.
# Arguments
embedding_file: The path to the embedding file.
Currently only the following GloVe files are supported:
"glove.6B.50d.txt", "glove.6B.100d.txt", "glove.6B.200d.txt"
"glove.6B.300d.txt", "glove.42B.300d.txt", "glove.840B.300d.txt".
You can download them from: https://nlp.stanford.edu/projects/glove/.
# Returns
Dictionary of word (string) and its corresponding index (int) obtained from
the given embedding file.
"""
return callBigDlFunc(bigdl_type, "wordEmbeddingGetWordIndex",
embedding_file)
class SparseEmbedding(ZooKerasLayer):
"""
SparseEmbedding is the sparse version of layer Embedding.
The input of SparseEmbedding should be a 2D SparseTensor or two 2D sparseTensors.
If the input is a SparseTensor, the values are positive integer ids,
values in each row of this SparseTensor will be turned into a dense vector.
If the input is two SparseTensors, the first tensor should be the integer ids, just
like the SparseTensor input. And the second tensor is the corresponding
weights of the integer ids.
This layer can only be used as the first layer in a model, you need to provide the argument
inputShape (a Single Shape, does not include the batch dimension).
# Arguments
input_dim: Size of the vocabulary. Int > 0.
output_dim: Dimension of the dense embedding. Int >= 0.
init: String representation of the initialization method for the weights of the layer.
Default is 'uniform'.
combiner: A string specifying the reduce type.
Currently "mean", "sum", "sqrtn" is supported.
max_norm: If provided, each embedding is normalized to have l2 norm equal to
maxNorm before combining.
W_regularizer: An instance of [[Regularizer]], (eg. L1 or L2 regularization),
applied to the embedding matrix. Default is None.
input_shape: A Single Shape, does not include the batch dimension.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> sparse_embedding = SparseEmbedding(input_dim=10, output_dim=4, input_shape=(10, ))
creating: createZooKerasSparseEmbedding
"""
def __init__(self, input_dim, output_dim, combiner="sum", max_norm=-1.0, init="uniform",
W_regularizer=None, input_shape=None, **kwargs):
super(SparseEmbedding, self).__init__(None,
input_dim,
output_dim,
combiner,
max_norm,
init,
W_regularizer,
list(input_shape) if input_shape else None,
**kwargs)
add kerasStyleAPIGuide (#586)
* add docs
* update
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from bigdl.util.common import callBigDlFunc
from ..engine.topology import ZooKerasLayer
if sys.version >= '3':
long = int
unicode = str
class Embedding(ZooKerasLayer):
"""
Turn positive integers (indexes) into dense vectors of fixed size.
The input of this layer should be 2D.
This layer can only be used as the first layer in a model, you need to provide the argument
input_length (an integer) or input_shape (a shape tuple, does not include the batch dimension).
# Arguments
input_dim: Size of the vocabulary. Int > 0.
output_dim: Dimension of the dense embedding. Int >= 0.
init: String representation of the initialization method for the weights of the layer.
Default is 'uniform'.
W_regularizer: An instance of [[Regularizer]], (eg. L1 or L2 regularization),
applied to the embedding matrix. Default is None.
input_length: Positive int. The sequence length of each input.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> embedding = Embedding(1000, 32, input_length=10, name="embedding1")
creating: createZooKerasEmbedding
"""
def __init__(self, input_dim, output_dim, init="uniform", input_length=None,
W_regularizer=None, input_shape=None, **kwargs):
if input_length:
input_shape = (input_length,)
super(Embedding, self).__init__(None,
input_dim,
output_dim,
init,
W_regularizer,
list(input_shape) if input_shape else None,
**kwargs)
class WordEmbedding(ZooKerasLayer):
"""
Embedding layer with pre-trained weights for words.
Turn non-negative integers (indices) into dense vectors of fixed size.
Currently only GloVe embedding is supported.
The input of this layer should be 2D.
This layer can only be used as the first layer in a model, you need to provide the argument
input_length (an integer) or input_shape (a shape tuple, does not include the batch dimension).
# Arguments
embedding_file: The path to the embedding file.
Currently only the following GloVe files are supported:
"glove.6B.50d.txt", "glove.6B.100d.txt", "glove.6B.200d.txt"
"glove.6B.300d.txt", "glove.42B.300d.txt", "glove.840B.300d.txt".
You can download them from: https://nlp.stanford.edu/projects/glove/.
word_index: Dictionary of word (string) and its corresponding index (int).
The index is supposed to start from 1 with 0 reserved for unknown words.
During the prediction, if you have words that are not in the word_index
for the training, you can map them to index 0.
Default is None. In this case, all the words in the embedding_file will
be taken into account and you can call
WordEmbedding.get_word_index(embedding_file) to retrieve the dictionary.
trainable: To configure whether the weights of this layer will be updated or not.
Only False is supported for now.
input_length: Positive int. The sequence length of each input.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
"""
def __init__(self, embedding_file, word_index=None, trainable=False, input_length=None,
input_shape=None, **kwargs):
if input_length:
input_shape = (input_length, )
super(WordEmbedding, self).__init__(None,
embedding_file,
word_index,
trainable,
list(input_shape) if input_shape else None,
**kwargs)
@staticmethod
def get_word_index(embedding_file, bigdl_type="float"):
"""
Get the full wordIndex map from the given embedding_file.
# Arguments
embedding_file: The path to the embedding file.
Currently only the following GloVe files are supported:
"glove.6B.50d.txt", "glove.6B.100d.txt", "glove.6B.200d.txt"
"glove.6B.300d.txt", "glove.42B.300d.txt", "glove.840B.300d.txt".
You can download them from: https://nlp.stanford.edu/projects/glove/.
# Returns
Dictionary of word (string) and its corresponding index (int) obtained from
the given embedding file.
"""
return callBigDlFunc(bigdl_type, "wordEmbeddingGetWordIndex",
embedding_file)
class SparseEmbedding(ZooKerasLayer):
"""
SparseEmbedding is the sparse version of layer Embedding.
The input of SparseEmbedding should be a 2D SparseTensor or two 2D sparseTensors.
If the input is a SparseTensor, the values are positive integer ids,
values in each row of this SparseTensor will be turned into a dense vector.
If the input is two SparseTensors, the first tensor should be the integer ids, just
like the SparseTensor input. And the second tensor is the corresponding
weights of the integer ids.
This layer can only be used as the first layer in a model, you need to provide the argument
inputShape (a Single Shape, does not include the batch dimension).
# Arguments
input_dim: Size of the vocabulary. Int > 0.
output_dim: Dimension of the dense embedding. Int >= 0.
init: String representation of the initialization method for the weights of the layer.
Default is 'uniform'.
combiner: A string specifying the reduce type.
Currently "mean", "sum", "sqrtn" is supported.
max_norm: If provided, each embedding is normalized to have l2 norm equal to
maxNorm before combining.
W_regularizer: An instance of [[Regularizer]], (eg. L1 or L2 regularization),
applied to the embedding matrix. Default is None.
input_shape: A Single Shape, does not include the batch dimension.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> sparse_embedding = SparseEmbedding(input_dim=10, output_dim=4, input_shape=(10, ))
creating: createZooKerasSparseEmbedding
"""
def __init__(self, input_dim, output_dim, combiner="sum", max_norm=-1.0, init="uniform",
W_regularizer=None, input_shape=None, **kwargs):
super(SparseEmbedding, self).__init__(None,
input_dim,
output_dim,
combiner,
max_norm,
init,
W_regularizer,
list(input_shape) if input_shape else None,
**kwargs)
|
# coding: utf-8
import collections
class Goals():
def __init__(self, name):
self.goals = {}
self.edges = {}
self.closed = set()
self.selection = 1
self.previous_selection = 1
self.events = collections.deque()
self.add(name)
def add(self, name, add_to=0):
if add_to == 0:
add_to = self.selection
if add_to in self.closed:
return False
next_id = max(list(self.goals.keys()) + [0]) + 1
self.goals[next_id] = name
self.edges[next_id] = list()
self.events.append(('add', next_id, name, True))
self.toggle_link(add_to, next_id)
self.selection_cache = []
return True
def id_mapping(self, goal_id):
new_id = goal_id % 10
if len(self.goals) > 10:
new_id += 10 * ((goal_id - 1) // 10 + 1)
if len(self.goals) > 90:
new_id += 100 * ((goal_id - 1) // 100 + 1)
return new_id
def _select(self, goal_id):
self.selection_cache = []
self.selection = goal_id
self.events.append(('select', self.selection))
def select(self, goal_id):
if goal_id > len(self.goals):
return
if self.selection_cache:
goal_id = 10 * self.selection_cache.pop() + goal_id
possible_selections = [g for g in self.goals.keys()
if self.id_mapping(g) == goal_id]
if len(possible_selections) == 1:
if self.goals[possible_selections[0]]:
self._select(possible_selections[0])
else:
self.selection_cache.append(goal_id)
def hold_select(self):
self.previous_selection = self.selection
self.selection_cache = []
self.events.append(('hold_select', self.selection))
def all(self, keys='name'):
keys = keys.split(',')
result = dict()
for key, name in ((k, n) for k, n in self.goals.items() if n is not None):
value = {}
if 'open' in keys:
value['open'] = key not in self.closed
if 'name' in keys:
value['name'] = name
if 'edge' in keys:
value['edge'] = sorted(self.id_mapping(e) for e in self.edges[key])
if 'select' in keys:
if key == self.selection:
value['select'] = 'select'
elif key == self.previous_selection:
value['select'] = 'prev'
else:
value['select'] = None
result[self.id_mapping(key)] = value if len(keys) > 1 else value[keys[0]]
return result
def top(self):
return {self.id_mapping(key): value
for key, value in self.goals.items()
if key not in self.closed and
all(g in self.closed for g in self.edges[key])}
def insert(self, name):
self.selection_cache = []
if self.selection == self.previous_selection:
return
if self.add(name, self.previous_selection):
key = len(self.goals)
self.toggle_link(key, self.selection)
if self.selection in self.edges[self.previous_selection]:
self.toggle_link(self.previous_selection, self.selection)
def rename(self, new_name):
self.goals[self.selection] = new_name
self.selection_cache = []
self.events.append(('rename', new_name, self.selection))
def toggle_close(self):
if self.selection in self.closed:
parent_goals = [g for g, v in self.edges.items() if self.selection in v]
if not parent_goals or any(g for g in parent_goals if g not in self.closed):
self.closed.remove(self.selection)
self.events.append(('toggle_close', True, self.selection))
else:
if all(g in self.closed for g in self.edges[self.selection]):
self.closed.add(self.selection)
self.events.append(('toggle_close', False, self.selection))
self._select(1)
self.hold_select()
self.selection_cache = []
def delete(self, goal_id=0):
self.selection_cache = []
if goal_id == 0:
goal_id = self.selection
if goal_id == 1:
return
self.goals[goal_id] = None
self.closed.add(goal_id)
for next_goal in self.edges[goal_id]:
other_edges = list()
for k in (k for k in self.edges if k != goal_id):
other_edges.extend(self.edges[k])
if next_goal not in set(other_edges):
self.delete(next_goal)
self.edges.pop(goal_id)
for key, values in self.edges.items():
self.edges[key] = [v for v in values if v != goal_id]
self.events.append(('delete', goal_id))
self._select(1)
self.hold_select()
def toggle_link(self, lower=0, upper=0):
if lower == 0:
lower = self.previous_selection
if upper == 0:
upper = self.selection
self.selection_cache = []
if lower == upper:
return
if upper in self.edges[lower]:
# remove existing link unless it's the last one
edges_to_upper = sum(1 for g in self.goals
if g in self.edges and upper in self.edges[g])
if edges_to_upper > 1:
self.edges[lower].remove(upper)
self.events.append(('unlink', lower, upper))
else:
# create a new link unless it creates a loop
front, visited, total = set([upper]), set(), set()
while front:
g = front.pop()
visited.add(g)
for e in self.edges[g]:
total.add(e)
if e not in visited:
front.add(e)
if lower not in total:
self.edges[lower].append(upper)
self.events.append(('link', lower, upper))
def verify(self):
assert all(g in self.closed for p in self.closed for g in self.edges.get(p, [])), \
'Open goals could not be blocked by closed ones'
queue, visited = [1], set()
while queue:
goal = queue.pop()
queue.extend(g for g in self.edges[goal]
if g not in visited and self.goals[g] is not None)
visited.add(goal)
assert visited == set(x for x in self.goals.keys()
if self.goals[x] is not None), \
'All subgoals must be accessible from the root goal'
deleted_nodes = [g for g, v in self.goals.items() if v is None]
assert all(not self.edges.get(n) for n in deleted_nodes), \
'Deleted goals must have no dependencies'
return True
@staticmethod
def build(goals, edges, selection):
result = Goals('')
result.events.pop() # remove initial goal
goals_dict = dict((g[0], g[1]) for g in goals)
result.goals = dict((i, goals_dict.get(i))
for i in range(1, max(goals_dict.keys()) + 1))
result.closed = set(g[0] for g in goals if not g[2])
d = collections.defaultdict(lambda: list())
for parent, child in edges:
d[parent].append(child)
result.edges = dict(d)
result.edges.update(dict((g, []) for g in result.goals if g not in d))
selects = dict(selection)
result.selection = selects.get('selection', 1)
result.previous_selection = selects.get('previous_selection', 1)
result.verify()
return result
@staticmethod
def export(goals):
gs = [(g_id, g_name, g_id not in goals.closed)
for g_id, g_name in goals.goals.items()]
es = [(parent, child) for parent in goals.edges
for child in goals.edges[parent]]
sel = [('selection', goals.selection),
('previous_selection', goals.previous_selection)]
return gs, es, sel
fix bad link issue
# coding: utf-8
import collections
class Goals():
def __init__(self, name):
self.goals = {}
self.edges = {}
self.closed = set()
self.selection = 1
self.previous_selection = 1
self.events = collections.deque()
self.add(name)
def add(self, name, add_to=0):
if add_to == 0:
add_to = self.selection
if add_to in self.closed:
return False
next_id = max(list(self.goals.keys()) + [0]) + 1
self.goals[next_id] = name
self.edges[next_id] = list()
self.events.append(('add', next_id, name, True))
self.toggle_link(add_to, next_id)
self.selection_cache = []
return True
def id_mapping(self, goal_id):
new_id = goal_id % 10
if len(self.goals) > 10:
new_id += 10 * ((goal_id - 1) // 10 + 1)
if len(self.goals) > 90:
new_id += 100 * ((goal_id - 1) // 100 + 1)
return new_id
def _select(self, goal_id):
self.selection_cache = []
self.selection = goal_id
self.events.append(('select', self.selection))
def select(self, goal_id):
if goal_id > len(self.goals):
return
if self.selection_cache:
goal_id = 10 * self.selection_cache.pop() + goal_id
possible_selections = [g for g in self.goals.keys()
if self.id_mapping(g) == goal_id]
if len(possible_selections) == 1:
if self.goals[possible_selections[0]]:
self._select(possible_selections[0])
else:
self.selection_cache.append(goal_id)
def hold_select(self):
self.previous_selection = self.selection
self.selection_cache = []
self.events.append(('hold_select', self.selection))
def all(self, keys='name'):
keys = keys.split(',')
result = dict()
for key, name in ((k, n) for k, n in self.goals.items() if n is not None):
value = {}
if 'open' in keys:
value['open'] = key not in self.closed
if 'name' in keys:
value['name'] = name
if 'edge' in keys:
value['edge'] = sorted(self.id_mapping(e) for e in self.edges[key])
if 'select' in keys:
if key == self.selection:
value['select'] = 'select'
elif key == self.previous_selection:
value['select'] = 'prev'
else:
value['select'] = None
result[self.id_mapping(key)] = value if len(keys) > 1 else value[keys[0]]
return result
def top(self):
return {self.id_mapping(key): value
for key, value in self.goals.items()
if key not in self.closed and
all(g in self.closed for g in self.edges[key])}
def insert(self, name):
self.selection_cache = []
if self.selection == self.previous_selection:
return
if self.add(name, self.previous_selection):
key = len(self.goals)
self.toggle_link(key, self.selection)
if self.selection in self.edges[self.previous_selection]:
self.toggle_link(self.previous_selection, self.selection)
def rename(self, new_name):
self.goals[self.selection] = new_name
self.selection_cache = []
self.events.append(('rename', new_name, self.selection))
def toggle_close(self):
if self.selection in self.closed:
parent_goals = [g for g, v in self.edges.items() if self.selection in v]
if not parent_goals or any(g for g in parent_goals if g not in self.closed):
self.closed.remove(self.selection)
self.events.append(('toggle_close', True, self.selection))
else:
if all(g in self.closed for g in self.edges[self.selection]):
self.closed.add(self.selection)
self.events.append(('toggle_close', False, self.selection))
self._select(1)
self.hold_select()
self.selection_cache = []
def delete(self, goal_id=0):
self.selection_cache = []
if goal_id == 0:
goal_id = self.selection
if goal_id == 1:
return
self.goals[goal_id] = None
self.closed.add(goal_id)
for next_goal in self.edges[goal_id]:
other_edges = list()
for k in (k for k in self.edges if k != goal_id):
other_edges.extend(self.edges[k])
if next_goal not in set(other_edges):
self.delete(next_goal)
self.edges.pop(goal_id)
for key, values in self.edges.items():
self.edges[key] = [v for v in values if v != goal_id]
self.events.append(('delete', goal_id))
self._select(1)
self.hold_select()
def toggle_link(self, lower=0, upper=0):
if lower == 0:
lower = self.previous_selection
if upper == 0:
upper = self.selection
self.selection_cache = []
if lower == upper:
return
if upper in self.edges[lower]:
# remove existing link unless it's the last one
edges_to_upper = sum(1 for g in self.goals
if g in self.edges and upper in self.edges[g])
if edges_to_upper > 1:
self.edges[lower].remove(upper)
self.events.append(('unlink', lower, upper))
else:
# create a new link unless it breaks validity
if lower in self.closed and upper not in self.closed:
return
front, visited, total = set([upper]), set(), set()
while front:
g = front.pop()
visited.add(g)
for e in self.edges[g]:
total.add(e)
if e not in visited:
front.add(e)
if lower not in total:
self.edges[lower].append(upper)
self.events.append(('link', lower, upper))
def verify(self):
assert all(g in self.closed for p in self.closed for g in self.edges.get(p, [])), \
'Open goals could not be blocked by closed ones'
queue, visited = [1], set()
while queue:
goal = queue.pop()
queue.extend(g for g in self.edges[goal]
if g not in visited and self.goals[g] is not None)
visited.add(goal)
assert visited == set(x for x in self.goals.keys()
if self.goals[x] is not None), \
'All subgoals must be accessible from the root goal'
deleted_nodes = [g for g, v in self.goals.items() if v is None]
assert all(not self.edges.get(n) for n in deleted_nodes), \
'Deleted goals must have no dependencies'
return True
@staticmethod
def build(goals, edges, selection):
result = Goals('')
result.events.pop() # remove initial goal
goals_dict = dict((g[0], g[1]) for g in goals)
result.goals = dict((i, goals_dict.get(i))
for i in range(1, max(goals_dict.keys()) + 1))
result.closed = set(g[0] for g in goals if not g[2])
d = collections.defaultdict(lambda: list())
for parent, child in edges:
d[parent].append(child)
result.edges = dict(d)
result.edges.update(dict((g, []) for g in result.goals if g not in d))
selects = dict(selection)
result.selection = selects.get('selection', 1)
result.previous_selection = selects.get('previous_selection', 1)
result.verify()
return result
@staticmethod
def export(goals):
gs = [(g_id, g_name, g_id not in goals.closed)
for g_id, g_name in goals.goals.items()]
es = [(parent, child) for parent in goals.edges
for child in goals.edges[parent]]
sel = [('selection', goals.selection),
('previous_selection', goals.previous_selection)]
return gs, es, sel
|
#!/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: cfg/azisetup.py
# Author: Fabio Cassini <fabio.cassini@gmail.com>
# Copyright: (C) 2011 Astra S.r.l. C.so Cavallotti, 122 18038 Sanremo (IM)
# ------------------------------------------------------------------------------
# This file is part of X4GA
#
# X4GA is free software: you can redistribute it and/or modify
# it under the terms of the Affero GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# X4GA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with X4GA. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------------
import wx
import Env
bt = Env.Azienda.BaseTab
import awc.controls.windows as aw
import awc.controls.radiobox as awradio
import awc.controls.checkbox as awcheck
import awc.controls.linktable as lt
from awc.controls.numctrl import NumCtrl
from awc.controls.datectrl import DateCtrl
import cfg.azisetup_wdr as wdr
import stormdb as adb
from cfg.cfgcontab import CfgContab
import os
import lib
FRAME_TITLE = "Setup azienda"
class _SetupPanel(aw.Panel):
def __init__(self, *args, **kwargs):
aw.Panel.__init__(self, *args, **kwargs)
self.dbsetup = adb.DbTable(bt.TABNAME_CFGSETUP, 'setup')
def EncodeValue(self, value, name):
return value
def DecodeValue(self, value, name):
return value
def SetupRead(self):
db = self.dbsetup
cn = self.FindWindowByName
for ctr in aw.awu.GetAllChildrens(self):
if ctr.GetName()[:6] == 'setup_':
name = ctr.GetName()[6:]
if db.Retrieve('setup.chiave=%s', name) and db.RowsCount() == 1:
if isinstance(ctr, (awradio.RadioBox, awcheck.CheckBox)):
val = db.flag
elif isinstance(ctr, (int, long, lt.LinkTable, NumCtrl)):
val = db.importo
elif isinstance(ctr, DateCtrl):
val = db.data
else:
val = self.DecodeValue(db.descriz, name)
ctr.SetValue(val)
def SetupWrite(self):
out = True
db = self.dbsetup
cn = self.FindWindowByName
for ctr in aw.awu.GetAllChildrens(self):
name = ctr.GetName()
if name[:6] == 'setup_':
val = cn(name).GetValue()
name = ctr.GetName()[6:]
if db.Retrieve('setup.chiave=%s', name):
if db.RowsCount() == 0:
db.CreateNewRow()
db.chiave = name
if isinstance(ctr, (awradio.RadioBox, awcheck.CheckBox)):
db.flag = val
elif isinstance(ctr, (int, long, lt.LinkTable, NumCtrl)):
db.importo = val
elif isinstance(ctr, DateCtrl):
db.data = val
else:
db.descriz = self.EncodeValue(val, name)
if not db.Save():
aw.awu.MsgDialog(self, message="Problema in aggiornamento setup:\n%s" % repr(db.GetError()))
out = False
break
else:
aw.awu.MsgDialog(self, message="Problema in lettura setup:\n%s" % db.GetError())
out = False
break
import lib
evt = wx.PyCommandEvent(lib._evtCHANGEMENU)
wx.GetApp().GetTopWindow().AddPendingEvent(evt)
return out
def OnConfirm(self, event):
if self.Validate():
if self.SetupWrite():
event.Skip()
def Validate(self):
raise Exception, 'Classe non istanziabile'
# ------------------------------------------------------------------------------
class AziendaSetupPanel(_SetupPanel):
"""
Impostazione setup azienda.
"""
def __init__(self, *args, **kwargs):
_SetupPanel.__init__(self, *args, **kwargs)
wdr.AziendaSetup(self)
cn = lambda x: self.FindWindowByName(x)
for name, vals in (('tipo_contab', 'OS'),
('liqiva_periodic', 'MT')):
name = 'setup_'+name
cn(name).SetDataLink(name, list(vals))
tf = {True: '1', False: '0'}
for name, vals in (('setup_conbilricl', tf),
('setup_conbilrcee', tf),
('setup_conattritacc', tf),
('setup_magscocat', tf),
('setup_gesfidicli', tf),
('setup_magimgprod', tf),
('setup_magdigsearch', tf),
('setup_optdigsearch', tf),
('setup_opttabsearch', tf),
('setup_optlnkcrdpdc', tf),
('setup_optlnkgrdpdc', tf),
('setup_optlnkcrdcli', tf),
('setup_optlnkgrdcli', tf),
('setup_optlnkcrdfor', tf),
('setup_optlnkgrdfor', tf),
('setup_optnotifiche', tf),
('setup_magpzconf', tf),
('setup_magpzgrip', tf),
('setup_magppromo', tf),
('setup_magprovatt', tf),
('setup_magprovcli', tf),
('setup_magprovpro', tf),
('setup_magvisgia', tf),
('setup_magvispre', tf),
('setup_magviscos', tf),
('setup_magviscpf', tf),
('setup_magvisbcd', tf),
('setup_maggesacc', tf),
):
try:
cn(name).SetDataLink(name, vals)
except:
pass
self.SetupRead()
self.Bind(wx.EVT_BUTTON, self.OnConfirm, id=wdr.ID_BTNOK)
def SetupRead(self):
out = _SetupPanel.SetupRead(self)
cn = self.FindWindowByName
cn('setup_consovges').SetValue(bt.CONSOVGES) #il flag è numerico, ma memorizzato in setup.flag che è carattere
logo = os.path.join(self.GetLogoPath(), self.GetLogoFileName())
if os.path.isfile(logo):
cn('azienda_logo').display_image(logo)
return out
def SetupWrite(self):
cn = self.FindWindowByName
out = _SetupPanel.SetupWrite(self)
if out:
#aggiornamento ragione sociale su db aziende
out = False
cod = Env.Azienda.codice
host = Env.Azienda.DB.servername
user = Env.Azienda.DB.username
pswd = Env.Azienda.DB.password
dba = adb.db.DB(dbType=getattr(adb.db.__database__, '_dbType'), globalConnection=False)
if dba.Connect(host=host, user=user, passwd=pswd, db='x4'):
dbz = adb.DbTable('aziende', 'azi', db=dba)
if dbz.Retrieve('azi.codice=%s', cod):
if dbz.RowsCount() == 1:
dbz.codice = Env.Azienda.codice = cn('setup_azienda_codice').GetValue()
dbz.azienda = Env.Azienda.descrizione = cn('setup_azienda_ragsoc').GetValue()
if dbz.Save():
out = True
else:
aw.awu.MsgDialog(self, message="Problema in fase di aggiornamento della tabella aziende:\n%s" % repr(dbz.GetError()))
else:
aw.awu.MsgDialog(self, message="Codice '%s' non trovato su tabella aziende" % cod)
else:
aw.awu.MsgDialog(self, message="Problema in fase di aggiornamento della tabella aziende:\n%s" % repr(dbz.GetError()))
else:
aw.awu.MsgDialog(self, message="Problema di connessione al database delle aziende:\n%s" % dba._Error_MySQLdb)
dba.Close()
if out:
img = cn('azienda_logo')
if img.is_changed():
fns = img.get_image_filename()
if fns:
d = self.GetLogoPath(azienda_codice=cn('setup_azienda_codice').GetValue())
if d:
if not os.path.isdir(d):
os.makedirs(d)
try:
fnt = os.path.join(d, self.GetLogoFileName())
hs = open(fns, 'rb')
buf = hs.read()
hs.close()
ht = open(fnt, 'wb')
ht.write(buf)
ht.close()
cfg = Env.Azienda.config
p = cfg.get('Site', 'folder')
if os.path.isdir(p):
if aw.awu.MsgDialog(self, 'Aggiorno anche il logo aziendale su X4?', style=wx.ICON_QUESTION|wx.YES_NO|wx.NO_DEFAULT) == wx.ID_YES:
fnt = os.path.join(p, self.GetLogoFileName())
hs = open(fns, 'rb')
buf = hs.read()
hs.close()
ht = open(fnt, 'wb')
ht.write(buf)
ht.close()
except Exception, e:
aw.awu.MsgDialog(self, repr(e.args))
bt.ReadAziendaSetup()
return out
def GetLogoPath(self, azienda_codice=None):
import report
d = report.pathsub
if d:
d = os.path.join(d, 'immagini')
return d
def GetLogoFileName(self):
return 'logo_aziendale.jpg'
def Validate(self):
out = True
for name in ('codice', 'ragsoc', 'indirizzo', 'cap', 'citta', 'prov'):
ctr = self.FindWindowByName('setup_azienda_'+name)
if ctr.GetValue():
ctr.SetBackgroundColour(None)
else:
ctr.SetBackgroundColour(Env.Azienda.Colours.VALERR_BACKGROUND)
out = False
if not self.TestEsercizio():
aw.awu.MsgDialog(self, message="Giorno/Mese errati x l'esercizio")
out = False
self.Refresh()
if out:
def cn(x):
return self.FindWindowByName(x)
old = (bt.TIPO_CONTAB,
bt.CONBILRICL,
bt.CONBILRCEE,
bt.CONATTRITACC,
bt.CONPERRITACC,
bt.CONCOMRITACC,
bt.MAGPRE_DECIMALS,
bt.MAGQTA_DECIMALS,
bt.VALINT_DECIMALS,
bt.MAGEAN_PREFIX,
bt.MAGSCOCAT,
bt.MAGSCORPCOS,
bt.MAGSCORPPRE,
bt.GESFIDICLI,
bt.MAGIMGPROD,
bt.MAGDIGSEARCH,
bt.MAGEXCSEARCH,
bt.OPTDIGSEARCH,
bt.OPTTABSEARCH,
bt.OPTLNKCRDPDC,
bt.OPTLNKGRDPDC,
bt.OPTLNKCRDCLI,
bt.OPTLNKGRDCLI,
bt.OPTLNKCRDFOR,
bt.OPTLNKGRDFOR,
bt.OPTNOTIFICHE,
bt.OPTBACKUPDIR,
bt.MAGATTGRIP,
bt.MAGATTGRIF,
bt.MAGCDEGRIP,
bt.MAGCDEGRIF,
bt.MAGDATGRIP,
bt.MAGAGGGRIP,
bt.MAGALWGRIP,
bt.MAGPZCONF,
bt.MAGPZGRIP,
bt.MAGPPROMO,
bt.MAGVISGIA,
bt.MAGVISPRE,
bt.MAGVISCOS,
bt.MAGVISCPF,
bt.MAGVISBCD,
bt.MAGGESACC,
bt.MAGPROVATT,
bt.MAGPROVCLI,
bt.MAGPROVPRO,
bt.MAGPROVMOV,
bt.MAGPROVSEQ,
bt.MAGNOCODEDES,
bt.MAGNOCODEVET,
bt.MAGNOCDEFDES,
bt.MAGNOCDEFVET,
bt.MAGEXTRAVET,
bt.MAGNUMLIS,
bt.MAGDATLIS,
bt.MAGFORLIS,
bt.MAGERPLIS,
bt.MAGESPLIS,
bt.MAGVRGLIS,
bt.MAGVSGLIS,
bt.MAGREPLIS,
bt.MAGSEPLIS,
bt.MAGRELLIS,
bt.MAGSELLIS,)
bt.TIPO_CONTAB = cn('setup_tipo_contab').GetValue()
bt.CONSOVGES = cn('setup_consovges').GetValue()
bt.CONBILRICL = cn('setup_conbilricl').GetValue()
bt.CONBILRCEE = cn('setup_conbilrcee').GetValue()
bt.CONATTRITACC = cn('setup_conattritacc').GetValue()
bt.CONPERRITACC = cn('setup_conperritacc').GetValue()
bt.CONCOMRITACC = cn('setup_concomritacc').GetValue()
bt.MAGPRE_DECIMALS = cn('setup_magdec_prez').GetValue()
bt.MAGQTA_DECIMALS = cn('setup_magdec_qta').GetValue()
bt.VALINT_DECIMALS = cn('setup_contab_decimp').GetValue()
bt.MAGEAN_PREFIX = cn('setup_mageanprefix').GetValue()
bt.MAGSCOCAT = int(cn('setup_magscocat').GetValue())
bt.MAGSCORPCOS = cn('setup_magscorpcos').GetValue()
bt.MAGSCORPPRE = cn('setup_magscorppre').GetValue()
bt.GESFIDICLI = cn('setup_gesfidicli').GetValue()
bt.MAGIMGPROD = cn('setup_magimgprod').GetValue()
bt.MAGDIGSEARCH = bool(cn('setup_magdigsearch').GetValue())
bt.MAGEXCSEARCH = bool(cn('setup_magexcsearch').GetValue())
bt.OPTDIGSEARCH = bool(cn('setup_optdigsearch').GetValue())
bt.OPTTABSEARCH = bool(cn('setup_opttabsearch').GetValue())
bt.OPTLNKCRDPDC = bool(cn('setup_optlnkcrdpdc').GetValue())
bt.OPTLNKGRDPDC = bool(cn('setup_optlnkgrdpdc').GetValue())
bt.OPTLNKCRDCLI = bool(cn('setup_optlnkcrdcli').GetValue())
bt.OPTLNKGRDCLI = bool(cn('setup_optlnkgrdcli').GetValue())
bt.OPTLNKCRDFOR = bool(cn('setup_optlnkcrdfor').GetValue())
bt.OPTLNKGRDFOR = bool(cn('setup_optlnkgrdfor').GetValue())
bt.OPTNOTIFICHE = bool(cn('setup_optnotifiche').GetValue())
bt.OPTBACKUPDIR = cn('setup_optbackupdir').GetValue()
bt.MAGATTGRIP = bool(cn('setup_magattgrip').GetValue())
bt.MAGATTGRIF = bool(cn('setup_magattgrif').GetValue())
bt.MAGCDEGRIP = bool(cn('setup_magcdegrip').GetValue())
bt.MAGCDEGRIF = bool(cn('setup_magcdegrif').GetValue())
bt.MAGDATGRIP = bool(cn('setup_magdatgrip').GetValue())
bt.MAGAGGGRIP = bool(cn('setup_magagggrip').GetValue())
bt.MAGALWGRIP = bool(cn('setup_magalwgrip').GetValue())
bt.MAGPZCONF = bool(cn('setup_magpzconf').GetValue())
bt.MAGPZGRIP = bool(cn('setup_magpzgrip').GetValue())
bt.MAGPPROMO = bool(cn('setup_magppromo').GetValue())
bt.MAGVISGIA = bool(cn('setup_magvisgia').GetValue())
bt.MAGVISPRE = bool(cn('setup_magvispre').GetValue())
bt.MAGVISCOS = bool(cn('setup_magviscos').GetValue())
bt.MAGVISCPF = bool(cn('setup_magviscpf').GetValue())
bt.MAGVISBCD = bool(cn('setup_magvisbcd').GetValue())
bt.MAGGESACC = bool(cn('setup_maggesacc').GetValue())
bt.MAGPROVATT = bool(cn('setup_magprovatt').GetValue())
bt.MAGPROVCLI = bool(cn('setup_magprovcli').GetValue())
bt.MAGPROVPRO = bool(cn('setup_magprovpro').GetValue())
bt.MAGPROVMOV = cn('setup_magprovmov').GetValue()
bt.MAGPROVSEQ = cn('setup_magprovseq').GetValue()
bt.MAGNOCODEDES = cn('setup_magnocodedes').GetValue()
bt.MAGNOCODEVET = cn('setup_magnocodevet').GetValue()
bt.MAGNOCDEFDES = cn('setup_magnocdefdes').GetValue()
bt.MAGNOCDEFVET = cn('setup_magnocdefvet').GetValue()
bt.MAGEXTRAVET = cn('setup_magextravet').GetValue()
bt.MAGNUMLIS = int(cn('setup_magnumlis').GetValue())
bt.MAGDATLIS = int(cn('setup_magdatlis').GetValue())
bt.MAGFORLIS = int(cn('setup_magforlis').GetValue())
bt.MAGERPLIS = cn('setup_magerplis').GetValue()
bt.MAGESPLIS = cn('setup_magesplis').GetValue()
bt.MAGVRGLIS = cn('setup_magvrglis').GetValue()
bt.MAGVSGLIS = cn('setup_magvsglis').GetValue()
bt.MAGREPLIS = cn('setup_magreplis').GetValue()
bt.MAGSEPLIS = cn('setup_magseplis').GetValue()
bt.MAGRELLIS = cn('setup_magrellis').GetValue()
bt.MAGSELLIS = cn('setup_magsellis').GetValue()
bt.defstru()
out = wx.GetApp().TestDBVers(force=True)
if not out:
bt.TIPO_CONTAB,
bt.CONSOVGES,
bt.CONBILRICL,
bt.CONBILRCEE,
bt.CONATTRITACC,
bt.CONPERRITACC,
bt.CONCOMRITACC,
bt.MAGPRE_DECIMALS,
bt.MAGQTA_DECIMALS,
bt.VALINT_DECIMALS,
bt.MAGEAN_PREFIX,
bt.MAGSCOCAT,
bt.MAGSCORPCOS,
bt.MAGSCORPPRE,
bt.GESFIDICLI,
bt.MAGIMGPROD,
bt.MAGDIGSEARCH,
bt.MAGEXCSEARCH,
bt.OPTDIGSEARCH,
bt.OPTTABSEARCH,
bt.OPTLNKCRDPDC,
bt.OPTLNKGRDPDC,
bt.OPTLNKCRDCLI,
bt.OPTLNKGRDCLI,
bt.OPTLNKCRDFOR,
bt.OPTLNKGRDFOR,
bt.OPTNOTIFICHE,
bt.OPTBACKUPDIR,
bt.MAGATTGRIP,
bt.MAGATTGRIF,
bt.MAGCDEGRIP,
bt.MAGCDEGRIF,
bt.MAGDATGRIP,
bt.MAGAGGGRIP,
bt.MAGALWGRIP,
bt.MAGPZCONF,
bt.MAGPZGRIP,
bt.MAGPPROMO,
bt.MAGVISGIA,
bt.MAGVISPRE,
bt.MAGVISCOS,
bt.MAGVISCPF,
bt.MAGVISBCD,
bt.MAGGESACC,
bt.MAGPROVATT,
bt.MAGPROVCLI,
bt.MAGPROVPRO,
bt.MAGPROVMOV,
bt.MAGPROVSEQ,
bt.MAGNOCODEDES,
bt.MAGNOCODEVET,
bt.MAGNOCDEFDES,
bt.MAGNOCDEFVET,
bt.MAGEXTRAVET,
bt.MAGNUMLIS,
bt.MAGDATLIS,
bt.MAGFORLIS,
bt.MAGERPLIS,
bt.MAGESPLIS,
bt.MAGVRGLIS,
bt.MAGVSGLIS,
bt.MAGREPLIS,
bt.MAGSEPLIS,
bt.MAGRELLIS,
bt.MAGSELLIS = old
if out:
cfg = CfgContab()
cfg.SetEsercizio(Env.Azienda.Login.dataElab)
return out
def TestEsercizio(self):
out = True
cnv = lambda x: self.FindWindowByName('setup_'+x).GetValue()
try:
data = adb.DateTime.DateTime(Env.Azienda.Esercizio.dataElab.year,
cnv('esercizio_startmm'),
cnv('esercizio_startgg'))
except:
out = False
return out
# ------------------------------------------------------------------------------
class AziendaSetupDialog(aw.Dialog):
"""
Dialog impostazione setup azienda
"""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('title') and len(args) < 3:
kwargs['title'] = FRAME_TITLE
aw.Dialog.__init__(self, *args, **kwargs)
self.panel = AziendaSetupPanel(self, -1)
self.AddSizedPanel(self.panel)
self.CenterOnScreen()
for cid, func in ((wdr.ID_BTNOK, self.OnSave),):
self.Bind(wx.EVT_BUTTON, func, id=cid)
def OnSave(self, event):
evt = wx.PyCommandEvent(lib._evtCHANGEMENU)
wx.GetApp().GetTopWindow().AddPendingEvent(evt)
self.EndModal(1)
Aggiunti i controlli di validazione per inserimento valuta di conto e
magazzino di default
#!/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: cfg/azisetup.py
# Author: Fabio Cassini <fabio.cassini@gmail.com>
# Copyright: (C) 2011 Astra S.r.l. C.so Cavallotti, 122 18038 Sanremo (IM)
# ------------------------------------------------------------------------------
# This file is part of X4GA
#
# X4GA is free software: you can redistribute it and/or modify
# it under the terms of the Affero GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# X4GA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with X4GA. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------------
import wx
import Env
bt = Env.Azienda.BaseTab
import awc.controls.windows as aw
import awc.controls.radiobox as awradio
import awc.controls.checkbox as awcheck
import awc.controls.linktable as lt
from awc.controls.numctrl import NumCtrl
from awc.controls.datectrl import DateCtrl
import cfg.azisetup_wdr as wdr
import stormdb as adb
from cfg.cfgcontab import CfgContab
import os
import lib
FRAME_TITLE = "Setup azienda"
class _SetupPanel(aw.Panel):
def __init__(self, *args, **kwargs):
aw.Panel.__init__(self, *args, **kwargs)
self.dbsetup = adb.DbTable(bt.TABNAME_CFGSETUP, 'setup')
def EncodeValue(self, value, name):
return value
def DecodeValue(self, value, name):
return value
def SetupRead(self):
db = self.dbsetup
cn = self.FindWindowByName
for ctr in aw.awu.GetAllChildrens(self):
if ctr.GetName()[:6] == 'setup_':
name = ctr.GetName()[6:]
if db.Retrieve('setup.chiave=%s', name) and db.RowsCount() == 1:
if isinstance(ctr, (awradio.RadioBox, awcheck.CheckBox)):
val = db.flag
elif isinstance(ctr, (int, long, lt.LinkTable, NumCtrl)):
val = db.importo
elif isinstance(ctr, DateCtrl):
val = db.data
else:
val = self.DecodeValue(db.descriz, name)
ctr.SetValue(val)
def SetupWrite(self):
out = True
db = self.dbsetup
cn = self.FindWindowByName
for ctr in aw.awu.GetAllChildrens(self):
name = ctr.GetName()
if name[:6] == 'setup_':
val = cn(name).GetValue()
name = ctr.GetName()[6:]
if db.Retrieve('setup.chiave=%s', name):
if db.RowsCount() == 0:
db.CreateNewRow()
db.chiave = name
if isinstance(ctr, (awradio.RadioBox, awcheck.CheckBox)):
db.flag = val
elif isinstance(ctr, (int, long, lt.LinkTable, NumCtrl)):
db.importo = val
elif isinstance(ctr, DateCtrl):
db.data = val
else:
db.descriz = self.EncodeValue(val, name)
if not db.Save():
aw.awu.MsgDialog(self, message="Problema in aggiornamento setup:\n%s" % repr(db.GetError()))
out = False
break
else:
aw.awu.MsgDialog(self, message="Problema in lettura setup:\n%s" % db.GetError())
out = False
break
import lib
evt = wx.PyCommandEvent(lib._evtCHANGEMENU)
wx.GetApp().GetTopWindow().AddPendingEvent(evt)
return out
def OnConfirm(self, event):
if self.Validate():
if self.SetupWrite():
event.Skip()
def Validate(self):
raise Exception, 'Classe non istanziabile'
# ------------------------------------------------------------------------------
class AziendaSetupPanel(_SetupPanel):
"""
Impostazione setup azienda.
"""
def __init__(self, *args, **kwargs):
_SetupPanel.__init__(self, *args, **kwargs)
wdr.AziendaSetup(self)
cn = lambda x: self.FindWindowByName(x)
for name, vals in (('tipo_contab', 'OS'),
('liqiva_periodic', 'MT')):
name = 'setup_'+name
cn(name).SetDataLink(name, list(vals))
tf = {True: '1', False: '0'}
for name, vals in (('setup_conbilricl', tf),
('setup_conbilrcee', tf),
('setup_conattritacc', tf),
('setup_magscocat', tf),
('setup_gesfidicli', tf),
('setup_magimgprod', tf),
('setup_magdigsearch', tf),
('setup_optdigsearch', tf),
('setup_opttabsearch', tf),
('setup_optlnkcrdpdc', tf),
('setup_optlnkgrdpdc', tf),
('setup_optlnkcrdcli', tf),
('setup_optlnkgrdcli', tf),
('setup_optlnkcrdfor', tf),
('setup_optlnkgrdfor', tf),
('setup_optnotifiche', tf),
('setup_magpzconf', tf),
('setup_magpzgrip', tf),
('setup_magppromo', tf),
('setup_magprovatt', tf),
('setup_magprovcli', tf),
('setup_magprovpro', tf),
('setup_magvisgia', tf),
('setup_magvispre', tf),
('setup_magviscos', tf),
('setup_magviscpf', tf),
('setup_magvisbcd', tf),
('setup_maggesacc', tf),
):
try:
cn(name).SetDataLink(name, vals)
except:
pass
self.SetupRead()
self.Bind(wx.EVT_BUTTON, self.OnConfirm, id=wdr.ID_BTNOK)
def SetupRead(self):
out = _SetupPanel.SetupRead(self)
cn = self.FindWindowByName
cn('setup_consovges').SetValue(bt.CONSOVGES) #il flag è numerico, ma memorizzato in setup.flag che è carattere
logo = os.path.join(self.GetLogoPath(), self.GetLogoFileName())
if os.path.isfile(logo):
cn('azienda_logo').display_image(logo)
return out
def SetupWrite(self):
cn = self.FindWindowByName
out = _SetupPanel.SetupWrite(self)
if out:
#aggiornamento ragione sociale su db aziende
out = False
cod = Env.Azienda.codice
host = Env.Azienda.DB.servername
user = Env.Azienda.DB.username
pswd = Env.Azienda.DB.password
dba = adb.db.DB(dbType=getattr(adb.db.__database__, '_dbType'), globalConnection=False)
if dba.Connect(host=host, user=user, passwd=pswd, db='x4'):
dbz = adb.DbTable('aziende', 'azi', db=dba)
if dbz.Retrieve('azi.codice=%s', cod):
if dbz.RowsCount() == 1:
dbz.codice = Env.Azienda.codice = cn('setup_azienda_codice').GetValue()
dbz.azienda = Env.Azienda.descrizione = cn('setup_azienda_ragsoc').GetValue()
if dbz.Save():
out = True
else:
aw.awu.MsgDialog(self, message="Problema in fase di aggiornamento della tabella aziende:\n%s" % repr(dbz.GetError()))
else:
aw.awu.MsgDialog(self, message="Codice '%s' non trovato su tabella aziende" % cod)
else:
aw.awu.MsgDialog(self, message="Problema in fase di aggiornamento della tabella aziende:\n%s" % repr(dbz.GetError()))
else:
aw.awu.MsgDialog(self, message="Problema di connessione al database delle aziende:\n%s" % dba._Error_MySQLdb)
dba.Close()
if out:
img = cn('azienda_logo')
if img.is_changed():
fns = img.get_image_filename()
if fns:
d = self.GetLogoPath(azienda_codice=cn('setup_azienda_codice').GetValue())
if d:
if not os.path.isdir(d):
os.makedirs(d)
try:
fnt = os.path.join(d, self.GetLogoFileName())
hs = open(fns, 'rb')
buf = hs.read()
hs.close()
ht = open(fnt, 'wb')
ht.write(buf)
ht.close()
cfg = Env.Azienda.config
p = cfg.get('Site', 'folder')
if os.path.isdir(p):
if aw.awu.MsgDialog(self, 'Aggiorno anche il logo aziendale su X4?', style=wx.ICON_QUESTION|wx.YES_NO|wx.NO_DEFAULT) == wx.ID_YES:
fnt = os.path.join(p, self.GetLogoFileName())
hs = open(fns, 'rb')
buf = hs.read()
hs.close()
ht = open(fnt, 'wb')
ht.write(buf)
ht.close()
except Exception, e:
aw.awu.MsgDialog(self, repr(e.args))
bt.ReadAziendaSetup()
return out
def GetLogoPath(self, azienda_codice=None):
import report
d = report.pathsub
if d:
d = os.path.join(d, 'immagini')
return d
def GetLogoFileName(self):
return 'logo_aziendale.jpg'
def Validate(self):
out = True
cn = self.FindWindowByName
ci = self.FindWindowById
for name in ('codice', 'ragsoc', 'indirizzo', 'cap', 'citta', 'prov'):
ctr = cn('setup_azienda_'+name)
if ctr.GetValue():
ctr.SetBackgroundColour(None)
else:
ctr.SetBackgroundColour(Env.Azienda.Colours.VALERR_BACKGROUND)
out = False
if out and not self.TestEsercizio():
aw.awu.MsgDialog(self, message="Giorno/Mese errati x l'esercizio", style=wx.ICON_ERROR)
out = False
if out and cn('setup_contab_valcon').GetValue() is None:
aw.awu.MsgDialog(self, message="Definire la valuta di conto", style=wx.ICON_ERROR)
out = False
if out and ci(wdr.ID_MAGDEFAULT).GetValue() is None:
aw.awu.MsgDialog(self, message="Definire il magazzino di default", style=wx.ICON_ERROR)
out = False
self.Refresh()
if out:
old = (bt.TIPO_CONTAB,
bt.CONBILRICL,
bt.CONBILRCEE,
bt.CONATTRITACC,
bt.CONPERRITACC,
bt.CONCOMRITACC,
bt.MAGPRE_DECIMALS,
bt.MAGQTA_DECIMALS,
bt.VALINT_DECIMALS,
bt.MAGEAN_PREFIX,
bt.MAGSCOCAT,
bt.MAGSCORPCOS,
bt.MAGSCORPPRE,
bt.GESFIDICLI,
bt.MAGIMGPROD,
bt.MAGDIGSEARCH,
bt.MAGEXCSEARCH,
bt.OPTDIGSEARCH,
bt.OPTTABSEARCH,
bt.OPTLNKCRDPDC,
bt.OPTLNKGRDPDC,
bt.OPTLNKCRDCLI,
bt.OPTLNKGRDCLI,
bt.OPTLNKCRDFOR,
bt.OPTLNKGRDFOR,
bt.OPTNOTIFICHE,
bt.OPTBACKUPDIR,
bt.MAGATTGRIP,
bt.MAGATTGRIF,
bt.MAGCDEGRIP,
bt.MAGCDEGRIF,
bt.MAGDATGRIP,
bt.MAGAGGGRIP,
bt.MAGALWGRIP,
bt.MAGPZCONF,
bt.MAGPZGRIP,
bt.MAGPPROMO,
bt.MAGVISGIA,
bt.MAGVISPRE,
bt.MAGVISCOS,
bt.MAGVISCPF,
bt.MAGVISBCD,
bt.MAGGESACC,
bt.MAGPROVATT,
bt.MAGPROVCLI,
bt.MAGPROVPRO,
bt.MAGPROVMOV,
bt.MAGPROVSEQ,
bt.MAGNOCODEDES,
bt.MAGNOCODEVET,
bt.MAGNOCDEFDES,
bt.MAGNOCDEFVET,
bt.MAGEXTRAVET,
bt.MAGNUMLIS,
bt.MAGDATLIS,
bt.MAGFORLIS,
bt.MAGERPLIS,
bt.MAGESPLIS,
bt.MAGVRGLIS,
bt.MAGVSGLIS,
bt.MAGREPLIS,
bt.MAGSEPLIS,
bt.MAGRELLIS,
bt.MAGSELLIS,)
bt.TIPO_CONTAB = cn('setup_tipo_contab').GetValue()
bt.CONSOVGES = cn('setup_consovges').GetValue()
bt.CONBILRICL = cn('setup_conbilricl').GetValue()
bt.CONBILRCEE = cn('setup_conbilrcee').GetValue()
bt.CONATTRITACC = cn('setup_conattritacc').GetValue()
bt.CONPERRITACC = cn('setup_conperritacc').GetValue()
bt.CONCOMRITACC = cn('setup_concomritacc').GetValue()
bt.MAGPRE_DECIMALS = cn('setup_magdec_prez').GetValue()
bt.MAGQTA_DECIMALS = cn('setup_magdec_qta').GetValue()
bt.VALINT_DECIMALS = cn('setup_contab_decimp').GetValue()
bt.MAGEAN_PREFIX = cn('setup_mageanprefix').GetValue()
bt.MAGSCOCAT = int(cn('setup_magscocat').GetValue())
bt.MAGSCORPCOS = cn('setup_magscorpcos').GetValue()
bt.MAGSCORPPRE = cn('setup_magscorppre').GetValue()
bt.GESFIDICLI = cn('setup_gesfidicli').GetValue()
bt.MAGIMGPROD = cn('setup_magimgprod').GetValue()
bt.MAGDIGSEARCH = bool(cn('setup_magdigsearch').GetValue())
bt.MAGEXCSEARCH = bool(cn('setup_magexcsearch').GetValue())
bt.OPTDIGSEARCH = bool(cn('setup_optdigsearch').GetValue())
bt.OPTTABSEARCH = bool(cn('setup_opttabsearch').GetValue())
bt.OPTLNKCRDPDC = bool(cn('setup_optlnkcrdpdc').GetValue())
bt.OPTLNKGRDPDC = bool(cn('setup_optlnkgrdpdc').GetValue())
bt.OPTLNKCRDCLI = bool(cn('setup_optlnkcrdcli').GetValue())
bt.OPTLNKGRDCLI = bool(cn('setup_optlnkgrdcli').GetValue())
bt.OPTLNKCRDFOR = bool(cn('setup_optlnkcrdfor').GetValue())
bt.OPTLNKGRDFOR = bool(cn('setup_optlnkgrdfor').GetValue())
bt.OPTNOTIFICHE = bool(cn('setup_optnotifiche').GetValue())
bt.OPTBACKUPDIR = cn('setup_optbackupdir').GetValue()
bt.MAGATTGRIP = bool(cn('setup_magattgrip').GetValue())
bt.MAGATTGRIF = bool(cn('setup_magattgrif').GetValue())
bt.MAGCDEGRIP = bool(cn('setup_magcdegrip').GetValue())
bt.MAGCDEGRIF = bool(cn('setup_magcdegrif').GetValue())
bt.MAGDATGRIP = bool(cn('setup_magdatgrip').GetValue())
bt.MAGAGGGRIP = bool(cn('setup_magagggrip').GetValue())
bt.MAGALWGRIP = bool(cn('setup_magalwgrip').GetValue())
bt.MAGPZCONF = bool(cn('setup_magpzconf').GetValue())
bt.MAGPZGRIP = bool(cn('setup_magpzgrip').GetValue())
bt.MAGPPROMO = bool(cn('setup_magppromo').GetValue())
bt.MAGVISGIA = bool(cn('setup_magvisgia').GetValue())
bt.MAGVISPRE = bool(cn('setup_magvispre').GetValue())
bt.MAGVISCOS = bool(cn('setup_magviscos').GetValue())
bt.MAGVISCPF = bool(cn('setup_magviscpf').GetValue())
bt.MAGVISBCD = bool(cn('setup_magvisbcd').GetValue())
bt.MAGGESACC = bool(cn('setup_maggesacc').GetValue())
bt.MAGPROVATT = bool(cn('setup_magprovatt').GetValue())
bt.MAGPROVCLI = bool(cn('setup_magprovcli').GetValue())
bt.MAGPROVPRO = bool(cn('setup_magprovpro').GetValue())
bt.MAGPROVMOV = cn('setup_magprovmov').GetValue()
bt.MAGPROVSEQ = cn('setup_magprovseq').GetValue()
bt.MAGNOCODEDES = cn('setup_magnocodedes').GetValue()
bt.MAGNOCODEVET = cn('setup_magnocodevet').GetValue()
bt.MAGNOCDEFDES = cn('setup_magnocdefdes').GetValue()
bt.MAGNOCDEFVET = cn('setup_magnocdefvet').GetValue()
bt.MAGEXTRAVET = cn('setup_magextravet').GetValue()
bt.MAGNUMLIS = int(cn('setup_magnumlis').GetValue())
bt.MAGDATLIS = int(cn('setup_magdatlis').GetValue())
bt.MAGFORLIS = int(cn('setup_magforlis').GetValue())
bt.MAGERPLIS = cn('setup_magerplis').GetValue()
bt.MAGESPLIS = cn('setup_magesplis').GetValue()
bt.MAGVRGLIS = cn('setup_magvrglis').GetValue()
bt.MAGVSGLIS = cn('setup_magvsglis').GetValue()
bt.MAGREPLIS = cn('setup_magreplis').GetValue()
bt.MAGSEPLIS = cn('setup_magseplis').GetValue()
bt.MAGRELLIS = cn('setup_magrellis').GetValue()
bt.MAGSELLIS = cn('setup_magsellis').GetValue()
bt.defstru()
out = wx.GetApp().TestDBVers(force=True)
if not out:
bt.TIPO_CONTAB,
bt.CONSOVGES,
bt.CONBILRICL,
bt.CONBILRCEE,
bt.CONATTRITACC,
bt.CONPERRITACC,
bt.CONCOMRITACC,
bt.MAGPRE_DECIMALS,
bt.MAGQTA_DECIMALS,
bt.VALINT_DECIMALS,
bt.MAGEAN_PREFIX,
bt.MAGSCOCAT,
bt.MAGSCORPCOS,
bt.MAGSCORPPRE,
bt.GESFIDICLI,
bt.MAGIMGPROD,
bt.MAGDIGSEARCH,
bt.MAGEXCSEARCH,
bt.OPTDIGSEARCH,
bt.OPTTABSEARCH,
bt.OPTLNKCRDPDC,
bt.OPTLNKGRDPDC,
bt.OPTLNKCRDCLI,
bt.OPTLNKGRDCLI,
bt.OPTLNKCRDFOR,
bt.OPTLNKGRDFOR,
bt.OPTNOTIFICHE,
bt.OPTBACKUPDIR,
bt.MAGATTGRIP,
bt.MAGATTGRIF,
bt.MAGCDEGRIP,
bt.MAGCDEGRIF,
bt.MAGDATGRIP,
bt.MAGAGGGRIP,
bt.MAGALWGRIP,
bt.MAGPZCONF,
bt.MAGPZGRIP,
bt.MAGPPROMO,
bt.MAGVISGIA,
bt.MAGVISPRE,
bt.MAGVISCOS,
bt.MAGVISCPF,
bt.MAGVISBCD,
bt.MAGGESACC,
bt.MAGPROVATT,
bt.MAGPROVCLI,
bt.MAGPROVPRO,
bt.MAGPROVMOV,
bt.MAGPROVSEQ,
bt.MAGNOCODEDES,
bt.MAGNOCODEVET,
bt.MAGNOCDEFDES,
bt.MAGNOCDEFVET,
bt.MAGEXTRAVET,
bt.MAGNUMLIS,
bt.MAGDATLIS,
bt.MAGFORLIS,
bt.MAGERPLIS,
bt.MAGESPLIS,
bt.MAGVRGLIS,
bt.MAGVSGLIS,
bt.MAGREPLIS,
bt.MAGSEPLIS,
bt.MAGRELLIS,
bt.MAGSELLIS = old
if out:
cfg = CfgContab()
cfg.SetEsercizio(Env.Azienda.Login.dataElab)
return out
def TestEsercizio(self):
out = True
cnv = lambda x: self.FindWindowByName('setup_'+x).GetValue()
try:
data = adb.DateTime.DateTime(Env.Azienda.Esercizio.dataElab.year,
cnv('esercizio_startmm'),
cnv('esercizio_startgg'))
except:
out = False
return out
# ------------------------------------------------------------------------------
class AziendaSetupDialog(aw.Dialog):
"""
Dialog impostazione setup azienda
"""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('title') and len(args) < 3:
kwargs['title'] = FRAME_TITLE
aw.Dialog.__init__(self, *args, **kwargs)
self.panel = AziendaSetupPanel(self, -1)
self.AddSizedPanel(self.panel)
self.CenterOnScreen()
for cid, func in ((wdr.ID_BTNOK, self.OnSave),):
self.Bind(wx.EVT_BUTTON, func, id=cid)
def OnSave(self, event):
evt = wx.PyCommandEvent(lib._evtCHANGEMENU)
wx.GetApp().GetTopWindow().AddPendingEvent(evt)
self.EndModal(1)
|
from textwrap import dedent
from functools import partial
from numpy import (
bool_,
dtype,
float32,
float64,
int32,
int64,
int16,
uint16,
ndarray,
uint32,
uint8,
)
from six import iteritems
from toolz import merge_with
from zipline.errors import (
WindowLengthNotPositive,
WindowLengthTooLong,
)
from zipline.lib.labelarray import LabelArray
from zipline.utils.numpy_utils import (
datetime64ns_dtype,
float64_dtype,
int64_dtype,
uint8_dtype,
)
from zipline.utils.memoize import lazyval
# These class names are all the same because of our bootleg templating system.
from ._float64window import AdjustedArrayWindow as Float64Window
from ._int64window import AdjustedArrayWindow as Int64Window
from ._labelwindow import AdjustedArrayWindow as LabelWindow
from ._uint8window import AdjustedArrayWindow as UInt8Window
BOOL_DTYPES = frozenset(
map(dtype, [bool_, uint8]),
)
FLOAT_DTYPES = frozenset(
map(dtype, [float32, float64]),
)
INT_DTYPES = frozenset(
# NOTE: uint64 not supported because it can't be safely cast to int64.
map(dtype, [int16, uint16, int32, int64, uint32]),
)
DATETIME_DTYPES = frozenset(
map(dtype, ['datetime64[ns]', 'datetime64[D]']),
)
# We use object arrays for strings.
OBJECT_DTYPES = frozenset(map(dtype, ['O']))
STRING_KINDS = frozenset(['S', 'U'])
REPRESENTABLE_DTYPES = BOOL_DTYPES.union(
FLOAT_DTYPES,
INT_DTYPES,
DATETIME_DTYPES,
OBJECT_DTYPES,
)
def can_represent_dtype(dtype):
"""
Can we build an AdjustedArray for a baseline of `dtype``?
"""
return dtype in REPRESENTABLE_DTYPES or dtype.kind in STRING_KINDS
def is_categorical(dtype):
"""
Do we represent this dtype with LabelArrays rather than ndarrays?
"""
return dtype in OBJECT_DTYPES or dtype.kind in STRING_KINDS
CONCRETE_WINDOW_TYPES = {
float64_dtype: Float64Window,
int64_dtype: Int64Window,
uint8_dtype: UInt8Window,
}
def _normalize_array(data, missing_value):
"""
Coerce buffer data for an AdjustedArray into a standard scalar
representation, returning the coerced array and a dict of argument to pass
to np.view to use when providing a user-facing view of the underlying data.
- float* data is coerced to float64 with viewtype float64.
- int32, int64, and uint32 are converted to int64 with viewtype int64.
- datetime[*] data is coerced to int64 with a viewtype of datetime64[ns].
- bool_ data is coerced to uint8 with a viewtype of bool_.
Parameters
----------
data : np.ndarray
Returns
-------
coerced, view_kwargs : (np.ndarray, np.dtype)
The input ``data`` array coerced to the appropriate pipeline type.
This may return the original array or a view over the same data.
"""
if isinstance(data, LabelArray):
return data, {}
data_dtype = data.dtype
if data_dtype in BOOL_DTYPES:
return data.astype(uint8, copy=False), {'dtype': dtype(bool_)}
elif data_dtype in FLOAT_DTYPES:
return data.astype(float64, copy=False), {'dtype': dtype(float64)}
elif data_dtype in INT_DTYPES:
return data.astype(int64, copy=False), {'dtype': dtype(int64)}
elif is_categorical(data_dtype):
if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES):
raise TypeError(
"Invalid missing_value for categorical array.\n"
"Expected None, bytes or unicode. Got %r." % missing_value,
)
return LabelArray(data, missing_value), {}
elif data_dtype.kind == 'M':
try:
outarray = data.astype('datetime64[ns]', copy=False).view('int64')
return outarray, {'dtype': datetime64ns_dtype}
except OverflowError:
raise ValueError(
"AdjustedArray received a datetime array "
"not representable as datetime64[ns].\n"
"Min Date: %s\n"
"Max Date: %s\n"
% (data.min(), data.max())
)
else:
raise TypeError(
"Don't know how to construct AdjustedArray "
"on data of type %s." % data_dtype
)
def _merge_simple(adjustment_lists, front_idx, back_idx):
"""
Merge lists of new and existing adjustments for a given index by appending
or prepending new adjustments to existing adjustments.
Notes
-----
This method is meant to be used with ``toolz.merge_with`` to merge
adjustment mappings. In case of a collision ``adjustment_lists`` contains
two lists, existing adjustments at index 0 and new adjustments at index 1.
When there are no collisions, ``adjustment_lists`` contains a single list.
Parameters
----------
adjustment_lists : list[list[Adjustment]]
List(s) of new and/or existing adjustments for a given index.
front_idx : int
Index of list in ``adjustment_lists`` that should be used as baseline
in case of a collision.
back_idx : int
Index of list in ``adjustment_lists`` that should extend baseline list
in case of a collision.
Returns
-------
adjustments : list[Adjustment]
List of merged adjustments for a given index.
"""
if len(adjustment_lists) == 1:
return list(adjustment_lists[0])
else:
return adjustment_lists[front_idx] + adjustment_lists[back_idx]
_merge_methods = {
'append': partial(_merge_simple, front_idx=0, back_idx=1),
'prepend': partial(_merge_simple, front_idx=1, back_idx=0),
}
class AdjustedArray(object):
"""
An array that can be iterated with a variable-length window, and which can
provide different views on data from different perspectives.
Parameters
----------
data : np.ndarray
The baseline data values. This array may be mutated by
``traverse(..., copy=False)`` calls.
adjustments : dict[int -> list[Adjustment]]
A dict mapping row indices to lists of adjustments to apply when we
reach that row.
missing_value : object
A value to use to fill missing data in yielded windows.
Should be a value coercible to `data.dtype`.
"""
__slots__ = (
'_data',
'_view_kwargs',
'adjustments',
'missing_value',
'_invalidated',
'__weakref__',
)
def __init__(self, data, adjustments, missing_value):
self._data, self._view_kwargs = _normalize_array(data, missing_value)
self.adjustments = adjustments
self.missing_value = missing_value
self._invalidated = False
def update_adjustments(self, adjustments, method):
"""
Merge ``adjustments`` with existing adjustments, handling index
collisions according to ``method``.
Parameters
----------
adjustments : dict[int -> list[Adjustment]]
The mapping of row indices to lists of adjustments that should be
appended to existing adjustments.
method : {'append', 'prepend'}
How to handle index collisions. If 'append', new adjustments will
be applied after previously-existing adjustments. If 'prepend', new
adjustments will be applied before previously-existing adjustments.
"""
try:
merge_func = _merge_methods[method]
except KeyError:
raise ValueError(
"Invalid merge method %s\n"
"Valid methods are: %s" % (method, ', '.join(_merge_methods))
)
self.adjustments = merge_with(
merge_func,
self.adjustments,
adjustments,
)
@property
def data(self):
"""
The data stored in this array.
"""
return self._data.view(**self._view_kwargs)
@lazyval
def dtype(self):
"""
The dtype of the data stored in this array.
"""
return self._view_kwargs.get('dtype') or self._data.dtype
@lazyval
def _iterator_type(self):
"""
The iterator produced when `traverse` is called on this Array.
"""
if isinstance(self._data, LabelArray):
return LabelWindow
return CONCRETE_WINDOW_TYPES[self._data.dtype]
def traverse(self,
window_length,
offset=0,
perspective_offset=0,
copy=True):
"""
Produce an iterator rolling windows rows over our data.
Each emitted window will have `window_length` rows.
Parameters
----------
window_length : int
The number of rows in each emitted window.
offset : int, optional
Number of rows to skip before the first window. Default is 0.
perspective_offset : int, optional
Number of rows past the end of the current window from which to
"view" the underlying data.
copy : bool, optional
Copy the underlying data. If ``copy=False``, the adjusted array
will be invalidated and cannot be traversed again.
"""
if self._invalidated:
raise ValueError('cannot traverse invalidated AdjustedArray')
data = self._data
if copy:
data = data.copy()
else:
self._invalidated = True
_check_window_params(data, window_length)
return self._iterator_type(
data,
self._view_kwargs,
self.adjustments,
offset,
window_length,
perspective_offset,
rounding_places=None,
)
def inspect(self):
"""
Return a string representation of the data stored in this array.
"""
return dedent(
"""\
Adjusted Array ({dtype}):
Data:
{data!r}
Adjustments:
{adjustments}
"""
).format(
dtype=self.dtype.name,
data=self.data,
adjustments=self.adjustments,
)
def update_labels(self, func):
"""
Map a function over baseline and adjustment values in place.
Note that the baseline data values must be a LabelArray.
"""
if not isinstance(self.data, LabelArray):
raise TypeError(
'update_labels only supported if data is of type LabelArray.'
)
# Map the baseline values.
self._data = self._data.map(func)
# Map each of the adjustments.
for _, row_adjustments in iteritems(self.adjustments):
for adjustment in row_adjustments:
adjustment.value = func(adjustment.value)
def ensure_adjusted_array(ndarray_or_adjusted_array, missing_value):
if isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, ndarray):
return AdjustedArray(
ndarray_or_adjusted_array, {}, missing_value,
)
else:
raise TypeError(
"Can't convert %s to AdjustedArray" %
type(ndarray_or_adjusted_array).__name__
)
def ensure_ndarray(ndarray_or_adjusted_array):
"""
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray.
"""
if isinstance(ndarray_or_adjusted_array, ndarray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array.data
else:
raise TypeError(
"Can't convert %s to ndarray" %
type(ndarray_or_adjusted_array).__name__
)
def _check_window_params(data, window_length):
"""
Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`.
"""
if window_length < 1:
raise WindowLengthNotPositive(window_length=window_length)
if window_length > data.shape[0]:
raise WindowLengthTooLong(
nrows=data.shape[0],
window_length=window_length,
)
ENH: ndarray.copy() always sets the contig to 'C'
from textwrap import dedent
from functools import partial
from numpy import (
bool_,
dtype,
float32,
float64,
int32,
int64,
int16,
uint16,
ndarray,
uint32,
uint8,
)
from six import iteritems
from toolz import merge_with
from zipline.errors import (
WindowLengthNotPositive,
WindowLengthTooLong,
)
from zipline.lib.labelarray import LabelArray
from zipline.utils.numpy_utils import (
datetime64ns_dtype,
float64_dtype,
int64_dtype,
uint8_dtype,
)
from zipline.utils.memoize import lazyval
# These class names are all the same because of our bootleg templating system.
from ._float64window import AdjustedArrayWindow as Float64Window
from ._int64window import AdjustedArrayWindow as Int64Window
from ._labelwindow import AdjustedArrayWindow as LabelWindow
from ._uint8window import AdjustedArrayWindow as UInt8Window
BOOL_DTYPES = frozenset(
map(dtype, [bool_, uint8]),
)
FLOAT_DTYPES = frozenset(
map(dtype, [float32, float64]),
)
INT_DTYPES = frozenset(
# NOTE: uint64 not supported because it can't be safely cast to int64.
map(dtype, [int16, uint16, int32, int64, uint32]),
)
DATETIME_DTYPES = frozenset(
map(dtype, ['datetime64[ns]', 'datetime64[D]']),
)
# We use object arrays for strings.
OBJECT_DTYPES = frozenset(map(dtype, ['O']))
STRING_KINDS = frozenset(['S', 'U'])
REPRESENTABLE_DTYPES = BOOL_DTYPES.union(
FLOAT_DTYPES,
INT_DTYPES,
DATETIME_DTYPES,
OBJECT_DTYPES,
)
def can_represent_dtype(dtype):
"""
Can we build an AdjustedArray for a baseline of `dtype``?
"""
return dtype in REPRESENTABLE_DTYPES or dtype.kind in STRING_KINDS
def is_categorical(dtype):
"""
Do we represent this dtype with LabelArrays rather than ndarrays?
"""
return dtype in OBJECT_DTYPES or dtype.kind in STRING_KINDS
CONCRETE_WINDOW_TYPES = {
float64_dtype: Float64Window,
int64_dtype: Int64Window,
uint8_dtype: UInt8Window,
}
def _normalize_array(data, missing_value):
"""
Coerce buffer data for an AdjustedArray into a standard scalar
representation, returning the coerced array and a dict of argument to pass
to np.view to use when providing a user-facing view of the underlying data.
- float* data is coerced to float64 with viewtype float64.
- int32, int64, and uint32 are converted to int64 with viewtype int64.
- datetime[*] data is coerced to int64 with a viewtype of datetime64[ns].
- bool_ data is coerced to uint8 with a viewtype of bool_.
Parameters
----------
data : np.ndarray
Returns
-------
coerced, view_kwargs : (np.ndarray, np.dtype)
The input ``data`` array coerced to the appropriate pipeline type.
This may return the original array or a view over the same data.
"""
if isinstance(data, LabelArray):
return data, {}
data_dtype = data.dtype
if data_dtype in BOOL_DTYPES:
return data.astype(uint8, copy=False), {'dtype': dtype(bool_)}
elif data_dtype in FLOAT_DTYPES:
return data.astype(float64, copy=False), {'dtype': dtype(float64)}
elif data_dtype in INT_DTYPES:
return data.astype(int64, copy=False), {'dtype': dtype(int64)}
elif is_categorical(data_dtype):
if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES):
raise TypeError(
"Invalid missing_value for categorical array.\n"
"Expected None, bytes or unicode. Got %r." % missing_value,
)
return LabelArray(data, missing_value), {}
elif data_dtype.kind == 'M':
try:
outarray = data.astype('datetime64[ns]', copy=False).view('int64')
return outarray, {'dtype': datetime64ns_dtype}
except OverflowError:
raise ValueError(
"AdjustedArray received a datetime array "
"not representable as datetime64[ns].\n"
"Min Date: %s\n"
"Max Date: %s\n"
% (data.min(), data.max())
)
else:
raise TypeError(
"Don't know how to construct AdjustedArray "
"on data of type %s." % data_dtype
)
def _merge_simple(adjustment_lists, front_idx, back_idx):
"""
Merge lists of new and existing adjustments for a given index by appending
or prepending new adjustments to existing adjustments.
Notes
-----
This method is meant to be used with ``toolz.merge_with`` to merge
adjustment mappings. In case of a collision ``adjustment_lists`` contains
two lists, existing adjustments at index 0 and new adjustments at index 1.
When there are no collisions, ``adjustment_lists`` contains a single list.
Parameters
----------
adjustment_lists : list[list[Adjustment]]
List(s) of new and/or existing adjustments for a given index.
front_idx : int
Index of list in ``adjustment_lists`` that should be used as baseline
in case of a collision.
back_idx : int
Index of list in ``adjustment_lists`` that should extend baseline list
in case of a collision.
Returns
-------
adjustments : list[Adjustment]
List of merged adjustments for a given index.
"""
if len(adjustment_lists) == 1:
return list(adjustment_lists[0])
else:
return adjustment_lists[front_idx] + adjustment_lists[back_idx]
_merge_methods = {
'append': partial(_merge_simple, front_idx=0, back_idx=1),
'prepend': partial(_merge_simple, front_idx=1, back_idx=0),
}
class AdjustedArray(object):
"""
An array that can be iterated with a variable-length window, and which can
provide different views on data from different perspectives.
Parameters
----------
data : np.ndarray
The baseline data values. This array may be mutated by
``traverse(..., copy=False)`` calls.
adjustments : dict[int -> list[Adjustment]]
A dict mapping row indices to lists of adjustments to apply when we
reach that row.
missing_value : object
A value to use to fill missing data in yielded windows.
Should be a value coercible to `data.dtype`.
"""
__slots__ = (
'_data',
'_view_kwargs',
'adjustments',
'missing_value',
'_invalidated',
'__weakref__',
)
def __init__(self, data, adjustments, missing_value):
self._data, self._view_kwargs = _normalize_array(data, missing_value)
self.adjustments = adjustments
self.missing_value = missing_value
self._invalidated = False
def update_adjustments(self, adjustments, method):
"""
Merge ``adjustments`` with existing adjustments, handling index
collisions according to ``method``.
Parameters
----------
adjustments : dict[int -> list[Adjustment]]
The mapping of row indices to lists of adjustments that should be
appended to existing adjustments.
method : {'append', 'prepend'}
How to handle index collisions. If 'append', new adjustments will
be applied after previously-existing adjustments. If 'prepend', new
adjustments will be applied before previously-existing adjustments.
"""
try:
merge_func = _merge_methods[method]
except KeyError:
raise ValueError(
"Invalid merge method %s\n"
"Valid methods are: %s" % (method, ', '.join(_merge_methods))
)
self.adjustments = merge_with(
merge_func,
self.adjustments,
adjustments,
)
@property
def data(self):
"""
The data stored in this array.
"""
return self._data.view(**self._view_kwargs)
@lazyval
def dtype(self):
"""
The dtype of the data stored in this array.
"""
return self._view_kwargs.get('dtype') or self._data.dtype
@lazyval
def _iterator_type(self):
"""
The iterator produced when `traverse` is called on this Array.
"""
if isinstance(self._data, LabelArray):
return LabelWindow
return CONCRETE_WINDOW_TYPES[self._data.dtype]
def traverse(self,
window_length,
offset=0,
perspective_offset=0,
copy=True):
"""
Produce an iterator rolling windows rows over our data.
Each emitted window will have `window_length` rows.
Parameters
----------
window_length : int
The number of rows in each emitted window.
offset : int, optional
Number of rows to skip before the first window. Default is 0.
perspective_offset : int, optional
Number of rows past the end of the current window from which to
"view" the underlying data.
copy : bool, optional
Copy the underlying data. If ``copy=False``, the adjusted array
will be invalidated and cannot be traversed again.
"""
if self._invalidated:
raise ValueError('cannot traverse invalidated AdjustedArray')
data = self._data
if copy:
data = data.copy(order='F')
else:
self._invalidated = True
_check_window_params(data, window_length)
return self._iterator_type(
data,
self._view_kwargs,
self.adjustments,
offset,
window_length,
perspective_offset,
rounding_places=None,
)
def inspect(self):
"""
Return a string representation of the data stored in this array.
"""
return dedent(
"""\
Adjusted Array ({dtype}):
Data:
{data!r}
Adjustments:
{adjustments}
"""
).format(
dtype=self.dtype.name,
data=self.data,
adjustments=self.adjustments,
)
def update_labels(self, func):
"""
Map a function over baseline and adjustment values in place.
Note that the baseline data values must be a LabelArray.
"""
if not isinstance(self.data, LabelArray):
raise TypeError(
'update_labels only supported if data is of type LabelArray.'
)
# Map the baseline values.
self._data = self._data.map(func)
# Map each of the adjustments.
for _, row_adjustments in iteritems(self.adjustments):
for adjustment in row_adjustments:
adjustment.value = func(adjustment.value)
def ensure_adjusted_array(ndarray_or_adjusted_array, missing_value):
if isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, ndarray):
return AdjustedArray(
ndarray_or_adjusted_array, {}, missing_value,
)
else:
raise TypeError(
"Can't convert %s to AdjustedArray" %
type(ndarray_or_adjusted_array).__name__
)
def ensure_ndarray(ndarray_or_adjusted_array):
"""
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray.
"""
if isinstance(ndarray_or_adjusted_array, ndarray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array.data
else:
raise TypeError(
"Can't convert %s to ndarray" %
type(ndarray_or_adjusted_array).__name__
)
def _check_window_params(data, window_length):
"""
Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`.
"""
if window_length < 1:
raise WindowLengthNotPositive(window_length=window_length)
if window_length > data.shape[0]:
raise WindowLengthTooLong(
nrows=data.shape[0],
window_length=window_length,
)
|
# highway=path, with route national (Pacific Crest Trail) at zoom 9
# https://www.openstreetmap.org/way/236361475
# https://www.openstreetmap.org/relation/1225378
assert_has_feature(
9, 86, 197, 'roads',
{ 'kind': 'path', 'walking_network': 'nwn'})
# highway=path, with route regional (Merced Pass Trail) at zoom 10
# https://www.openstreetmap.org/way/373491941
# https://www.openstreetmap.org/relation/5549623
assert_has_feature(
10, 171, 396, 'roads',
{ 'kind': 'path', 'walking_network': 'rwn'})
# highway=path, with route regional (Merced Pass Trail) at zoom 10
# https://www.openstreetmap.org/way/39996451
# https://www.openstreetmap.org/relation/5549623
assert_has_feature(
10, 172, 396, 'roads',
{ 'kind': 'path', 'walking_network': 'rwn'})
# highway=unclassified, with route local (Grant Avenue) at zoom 12
# part of The Barbary Coast Trail in San Francisco
# https://www.openstreetmap.org/way/91181758
# https://www.openstreetmap.org/relation/6322028
assert_has_feature(
12, 688, 1584, 'roads',
{ 'kind': 'minor_road', 'walking_network': 'lwn'})
# Way: Clara-Immerwahr-Straße (287167007)
# icn=yes is marked on the way
# http://www.openstreetmap.org/way/287167007
assert_has_feature(
8, 134, 85, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'icn'})
# Ferry between Denmark and Germany, icn
# https://www.openstreetmap.org/way/128631318
# https://www.openstreetmap.org/relation/721738
assert_has_feature(
8, 136, 81, 'roads',
{ 'kind': 'ferry', 'bicycle_network': 'icn'})
# Søndervangsvej minor road in Denmark as national cycle route
# https://www.openstreetmap.org/way/149701891
# https://www.openstreetmap.org/relation/349521
assert_has_feature(
8, 136, 79, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'ncn'})
# Part of Bay Trail in South (San Francisco) Bay
# way is marked rcn=yes, and part of a proper bike relation
# http://www.openstreetmap.org/way/44422697
# http://www.openstreetmap.org/relation/325779
assert_has_feature(
10, 164, 396, 'roads',
{ 'kind': 'path', 'bicycle_network': 'rcn'})
# Hyltebjerg Allé residential road with rcn in Copenhagen
# https://www.openstreetmap.org/way/2860759
# https://www.openstreetmap.org/relation/2087590
assert_has_feature(
10, 1095, 641, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'rcn'})
# lcn in Seattle (living street that would only be visible at zoom 13 otherwise) at zoom 11
# https://www.openstreetmap.org/way/6477775
# https://www.openstreetmap.org/relation/3541926
assert_has_feature(
11, 327, 715, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'lcn'})
# Kirkham Street lcn in San Francisco at zoom 11
# https://www.openstreetmap.org/way/89802424
# https://www.openstreetmap.org/relation/32313
assert_has_feature(
11, 327, 791, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'lcn'})
# Asiatisk Plads service road with lcn in Copenhagen
# https://www.openstreetmap.org/way/164049387
# https://www.openstreetmap.org/relation/6199242
assert_has_feature(
11, 1095, 641, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'lcn'})
skip failing test to validate the others
# highway=path, with route national (Pacific Crest Trail) at zoom 9
# https://www.openstreetmap.org/way/236361475
# https://www.openstreetmap.org/relation/1225378
assert_has_feature(
9, 86, 197, 'roads',
{ 'kind': 'path', 'walking_network': 'nwn'})
# highway=path, with route regional (Merced Pass Trail) at zoom 10
# https://www.openstreetmap.org/way/373491941
# https://www.openstreetmap.org/relation/5549623
#assert_has_feature(
# 10, 171, 396, 'roads',
# { 'kind': 'path', 'walking_network': 'rwn'})
# highway=path, with route regional (Merced Pass Trail) at zoom 10
# https://www.openstreetmap.org/way/39996451
# https://www.openstreetmap.org/relation/5549623
#assert_has_feature(
# 10, 172, 396, 'roads',
# { 'kind': 'path', 'walking_network': 'rwn'})
# highway=unclassified, with route local (Grant Avenue) at zoom 12
# part of The Barbary Coast Trail in San Francisco
# https://www.openstreetmap.org/way/91181758
# https://www.openstreetmap.org/relation/6322028
assert_has_feature(
12, 688, 1584, 'roads',
{ 'kind': 'minor_road', 'walking_network': 'lwn'})
# Way: Clara-Immerwahr-Straße (287167007)
# icn=yes is marked on the way
# http://www.openstreetmap.org/way/287167007
assert_has_feature(
8, 134, 85, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'icn'})
# Ferry between Denmark and Germany, icn
# https://www.openstreetmap.org/way/128631318
# https://www.openstreetmap.org/relation/721738
assert_has_feature(
8, 136, 81, 'roads',
{ 'kind': 'ferry', 'bicycle_network': 'icn'})
# Søndervangsvej minor road in Denmark as national cycle route
# https://www.openstreetmap.org/way/149701891
# https://www.openstreetmap.org/relation/349521
assert_has_feature(
8, 136, 79, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'ncn'})
# Part of Bay Trail in South (San Francisco) Bay
# way is marked rcn=yes, and part of a proper bike relation
# http://www.openstreetmap.org/way/44422697
# http://www.openstreetmap.org/relation/325779
assert_has_feature(
10, 164, 396, 'roads',
{ 'kind': 'path', 'bicycle_network': 'rcn'})
# Hyltebjerg Allé residential road with rcn in Copenhagen
# https://www.openstreetmap.org/way/2860759
# https://www.openstreetmap.org/relation/2087590
assert_has_feature(
10, 1095, 641, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'rcn'})
# lcn in Seattle (living street that would only be visible at zoom 13 otherwise) at zoom 11
# https://www.openstreetmap.org/way/6477775
# https://www.openstreetmap.org/relation/3541926
assert_has_feature(
11, 327, 715, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'lcn'})
# Kirkham Street lcn in San Francisco at zoom 11
# https://www.openstreetmap.org/way/89802424
# https://www.openstreetmap.org/relation/32313
assert_has_feature(
11, 327, 791, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'lcn'})
# Asiatisk Plads service road with lcn in Copenhagen
# https://www.openstreetmap.org/way/164049387
# https://www.openstreetmap.org/relation/6199242
assert_has_feature(
11, 1095, 641, 'roads',
{ 'kind': 'minor_road', 'bicycle_network': 'lcn'})
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import shutil
import tarfile
from contextlib import closing
from cloudify.utils import get_kerberos_indication
from cloudify.cluster_status import CloudifyNodeType
from cloudify_rest_client.exceptions import (CloudifyClientError,
UserUnauthorizedError)
from . import init
from .. import env
from .. import utils
from ..cli import cfy
from .. import constants
from ..cli import helptexts
from ..env import get_rest_client
from ..exceptions import CloudifyCliError
from ..table import print_data, print_single
from ..commands.cluster import _all_in_one_manager
from ..commands.cluster import update_profile_logic as update_cluster_profile
EXPORTED_KEYS_DIRNAME = '.exported-ssh-keys'
EXPORTED_SSH_KEYS_DIR = os.path.join(env.PROFILES_DIR, EXPORTED_KEYS_DIRNAME)
PROFILE_COLUMNS = ['name', 'manager_ip', 'manager_username', 'manager_tenant',
'ssh_user', 'ssh_key_path', 'ssh_port', 'kerberos_env',
'rest_port', 'rest_protocol', 'rest_certificate']
CLUSTER_PROFILE_COLUMNS = PROFILE_COLUMNS[:1] + ['hostname', 'host_ip'] \
+ PROFILE_COLUMNS[2:]
@cfy.group(name='profiles')
@cfy.options.common_options
def profiles():
"""
Handle Cloudify CLI profiles
Each profile can manage a single Cloudify manager.
A profile is automatically created when using the `cfy profiles use`
command.
Profiles are named according to the IP of the manager they manage.
"""
if not env.is_initialized():
init.init_local_profile()
def _format_cluster_profile(profile):
"""
Format the list of cluster nodes for display in `cfy cluster show`,
we show the profile details of every stored cluster node.
"""
common_attributes = {k: profile.get(k) for k in CLUSTER_PROFILE_COLUMNS}
nodes = []
for node in profile['cluster'][CloudifyNodeType.MANAGER]:
# merge the common attrs with node data, but rename node's name
# attribute to cluster_node, because the attribute 'name' is
# reserved for the profile name
node_data = dict(node)
node_data['hostname'] = node_data.pop('hostname')
nodes.append(dict(common_attributes, **node_data))
return nodes
@profiles.command(name='show-current',
short_help='Retrieve current profile information')
@cfy.options.common_options
@cfy.pass_logger
def show(logger):
"""
Shows your current active profile and it's properties
"""
active_profile_name = env.get_active_profile()
if active_profile_name == 'local':
logger.info("You're currently working in local mode. "
"To use a manager run `cfy profiles use MANAGER_IP`")
return
active_profile = _get_profile(env.get_active_profile())
if active_profile.get('cluster'):
print_data(CLUSTER_PROFILE_COLUMNS,
_format_cluster_profile(active_profile),
'Cluster nodes in profile {0}:'
.format(active_profile['name']),
labels={
'profile_name': 'Name',
'hostname': 'Manager hostname',
'host_ip': 'Manager ip'})
else:
print_single(PROFILE_COLUMNS, active_profile, 'Active profile:')
@profiles.command(name='list',
short_help='List profiles')
@cfy.options.common_options
@cfy.pass_logger
def list(logger):
"""
List all profiles
"""
current_profile = env.get_active_profile()
profiles = []
profile_names = env.get_profile_names()
for profile in profile_names:
profile_data = _get_profile(profile)
if profile == current_profile:
# Show the currently active profile by appending *
profile_data['name'] = '*' + profile_data['name']
profiles.append(profile_data)
if profiles:
logger.info('Listing all profiles...')
print_data(PROFILE_COLUMNS, profiles, 'Profiles:')
if not profile_names:
logger.info(
'No profiles found. You can create a new profile '
'by using an existing manager via the `cfy profiles use` command')
@profiles.command(name='use',
short_help='Control a specific manager')
@cfy.argument('manager-ip')
@cfy.options.profile_name
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.manager_username
@cfy.options.manager_password
@cfy.options.manager_tenant
@cfy.options.rest_port
@cfy.options.ssl_rest
@cfy.options.rest_certificate
@cfy.options.kerberos_env
@cfy.options.skip_credentials_validation
@cfy.options.common_options
@cfy.pass_logger
def use(manager_ip,
profile_name,
skip_credentials_validation,
logger,
**kwargs):
"""Control a specific manager
`PROFILE_NAME` can be either a manager IP or `local`.
Additional CLI commands will be added after a manager is used.
To stop using a manager, you can run `cfy init -r`.
"""
if not profile_name:
profile_name = manager_ip
if profile_name == 'local':
logger.info('Using local environment...')
if not env.is_profile_exists(profile_name):
init.init_local_profile()
env.set_active_profile('local')
return
if env.is_profile_exists(profile_name):
_switch_profile(
manager_ip=manager_ip,
profile_name=profile_name,
logger=logger,
**kwargs)
else:
_create_profile(
manager_ip=manager_ip,
profile_name=profile_name,
skip_credentials_validation=skip_credentials_validation,
logger=logger,
**kwargs)
if not env.profile.manager_username:
return
_update_cluster_profile_to_dict(logger)
def _update_cluster_profile_to_dict(logger):
if type(env.profile.cluster) == type([]): # noqa
env.profile.cluster = dict()
env.profile.save()
client = get_rest_client()
if not _all_in_one_manager(client):
update_cluster_profile(client, logger)
def _switch_profile(manager_ip, profile_name, logger, **kwargs):
# if using an existing profile, it is an error to provide any --option,
# because the way to update an existing profile is `cfy profiles set`
provided_options = [key for key, value in kwargs.items() if value]
if any(provided_options):
logger.warning('Profile {0} already exists. '
'The passed in options are ignored: {1}. '
'To update the profile, use `cfy profiles set`'
.format(profile_name, ', '.join(provided_options)))
env.set_active_profile(profile_name)
logger.info('Using manager {0}'.format(profile_name))
def _create_profile(
manager_ip,
profile_name,
ssh_user,
ssh_key,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
ssl,
rest_certificate,
kerberos_env,
skip_credentials_validation,
logger):
# If REST certificate is provided, then automatically
# assume SSL.
if rest_certificate:
ssl = True
rest_protocol, default_rest_port = _get_ssl_protocol_and_port(ssl)
if not rest_port:
rest_port = default_rest_port
# kerberos_env default is `False` and not `None`
kerberos_env = get_kerberos_indication(kerberos_env) or False
logger.info('Attempting to connect to {0} through port {1}, using {2} '
'(SSL mode: {3})...'.format(manager_ip, rest_port,
rest_protocol, ssl))
# First, attempt to get the provider from the manager - should it fail,
# the manager's profile directory won't be created
provider_context = _get_provider_context(
profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
kerberos_env,
skip_credentials_validation
)
init.init_manager_profile(profile_name=profile_name)
logger.info('Using manager {0} with port {1}'.format(
manager_ip, rest_port))
_set_profile_context(
profile_name,
provider_context,
manager_ip,
ssh_key,
ssh_user,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
rest_protocol,
rest_certificate,
kerberos_env
)
env.set_active_profile(profile_name)
@profiles.command(name='delete',
short_help='Delete a profile')
@cfy.argument('profile-name')
@cfy.options.common_options
@cfy.pass_logger
def delete(profile_name, logger):
"""Delete a profile
`PROFILE_NAME` is the IP of the manager the profile manages.
"""
logger.info('Deleting profile {0}...'.format(profile_name))
try:
env.delete_profile(profile_name)
logger.info('Profile deleted')
except CloudifyCliError as ex:
logger.info(str(ex))
def set_profile(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
rest_port,
kerberos_env,
skip_credentials_validation,
logger):
"""Set the profile name, manager username and/or password and/or tenant
and/or ssl state (on/off) in the *current* profile
"""
if not any([profile_name, ssh_user, ssh_key, ssh_port, manager_username,
manager_password, manager_tenant, ssl is not None,
rest_certificate, kerberos_env is not None]):
raise CloudifyCliError(
"You must supply at least one of the following: "
"profile name, username, password, tenant, "
"ssl, rest certificate, ssh user, ssh key, ssh port, kerberos env")
username = manager_username or env.get_username()
password = manager_password or env.get_password()
tenant = manager_tenant or env.get_tenant_name()
protocol, port = _get_ssl_protocol_and_port(ssl)
if rest_port is not None:
port = rest_port
if not skip_credentials_validation:
_validate_credentials(username,
password,
tenant,
rest_certificate,
protocol,
port,
kerberos_env)
old_name = None
if profile_name:
if profile_name == 'local':
raise CloudifyCliError('Cannot use the reserved name "local"')
if env.is_profile_exists(profile_name):
raise CloudifyCliError('Profile {0} already exists'
.format(profile_name))
old_name = env.profile.profile_name
env.profile.profile_name = profile_name
if manager_username:
logger.info('Setting username to `{0}`'.format(manager_username))
env.profile.manager_username = manager_username
if manager_password:
logger.info('Setting password to `{0}`'.format(manager_password))
env.profile.manager_password = manager_password
if manager_tenant:
logger.info('Setting tenant to `{0}`'.format(manager_tenant))
env.profile.manager_tenant = manager_tenant
if ssl is not None:
_set_profile_ssl(ssl, rest_port, logger)
if rest_certificate:
logger.info(
'Setting rest certificate to `{0}`'.format(rest_certificate))
env.profile.rest_certificate = rest_certificate
if rest_port:
logger.info('Setting rest port to `{0}'.format(rest_port))
env.profile.rest_port = rest_port
if ssh_user:
logger.info('Setting ssh user to `{0}`'.format(ssh_user))
env.profile.ssh_user = ssh_user
if ssh_key:
logger.info('Setting ssh key to `{0}`'.format(ssh_key))
env.profile.ssh_key = ssh_key
if ssh_port:
logger.info('Setting ssh port to `{0}`'.format(ssh_port))
env.profile.ssh_port = ssh_port
if kerberos_env is not None:
logger.info('Setting kerberos_env to `{0}`'.format(kerberos_env))
env.profile.kerberos_env = kerberos_env
env.profile.save()
if old_name is not None:
env.set_active_profile(profile_name)
env.delete_profile(old_name)
logger.info('Settings saved successfully')
def _set_profile_ssl(ssl, rest_port, logger):
if ssl is None:
raise CloudifyCliError('Internal error: SSL must be either `on` or '
'`off`')
protocol, port = _get_ssl_protocol_and_port(ssl)
if rest_port is not None:
port = rest_port
if protocol == constants.SECURED_REST_PROTOCOL:
logger.info('Enabling SSL in the local profile')
else:
logger.info('Disabling SSL in the local profile')
env.profile.rest_port = port
env.profile.rest_protocol = protocol
_update_cluster_profile_to_dict(logger)
manager_cluster = env.profile.cluster.get(CloudifyNodeType.MANAGER)
if manager_cluster:
missing_certs = []
for node in manager_cluster:
node['rest_port'] = port
node['rest_protocol'] = protocol
logger.info('Enabling SSL for {0}'.format(node['host_ip']))
if not node.get('cert'):
missing_certs.append(node['hostname'])
if missing_certs:
logger.warning('The following cluster nodes have no certificate '
'set: {0}'.format(', '.join(missing_certs)))
logger.warning('If required, set the certificates for those '
'nodes using `cfy profiles set-cluster`')
@profiles.command(
name='set',
short_help='Set name/manager username/password/tenant in current profile')
@cfy.options.profile_name
@cfy.options.manager_username
@cfy.options.manager_password
@cfy.options.manager_tenant
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.ssl_state
@cfy.options.rest_certificate
@cfy.options.rest_port
@cfy.options.kerberos_env
@cfy.options.skip_credentials_validation
@cfy.options.common_options
@cfy.pass_logger
def set_cmd(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
rest_port,
kerberos_env,
skip_credentials_validation,
logger):
return set_profile(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
_get_ssl_indication(ssl),
rest_certificate,
rest_port,
get_kerberos_indication(kerberos_env),
skip_credentials_validation,
logger)
@profiles.command(
name='set-cluster',
short_help='Set connection options for a cluster node')
@cfy.argument('cluster-node-name')
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.rest_certificate
@cfy.pass_logger
def set_cluster(cluster_node_name,
ssh_user,
ssh_key,
ssh_port,
rest_certificate,
logger):
"""Set connection options for a Manager cluster node.
`CLUSTER_NODE_NAME` is the Manager cluster node name to set options for.
"""
manager_cluster = env.profile.cluster.get(CloudifyNodeType.MANAGER)
if not manager_cluster:
err = CloudifyCliError('The current profile is not a cluster profile!')
err.possible_solutions = [
"Select a different profile using `cfy profiles use`",
"Run `cfy cluster update-profile`"
]
raise err
changed_node = None
for node in manager_cluster:
if node['hostname'] == cluster_node_name:
changed_node = node
break
else:
raise CloudifyCliError(
'Node {0} not found in the cluster'.format(cluster_node_name))
for source, target, label in [
(ssh_user, 'ssh_user', 'ssh user'),
(ssh_key, 'ssh_key', 'ssh key'),
(ssh_port, 'ssh_port', 'ssh port'),
]:
if source:
changed_node[target] = source
logger.info('Node {0}: setting {1} to `{2}`'
.format(cluster_node_name, label, source))
if rest_certificate:
changed_node['cert'] = rest_certificate
changed_node['trust_all'] = False
changed_node['rest_protocol'] = 'https'
logger.info('Node {0}: setting rest-certificate to `{1}` and enabling '
'certificate verification'
.format(cluster_node_name, source))
env.profile.save()
logger.info('Settings saved successfully')
@profiles.command(
name='unset',
short_help='Clear manager username/password/tenant from current profile')
@cfy.options.manager_username_flag
@cfy.options.manager_password_flag
@cfy.options.manager_tenant_flag
@cfy.options.ssh_user_flag
@cfy.options.ssh_key_flag
@cfy.options.rest_certificate_flag
@cfy.options.kerberos_env_flag
@cfy.options.skip_credentials_validation
@cfy.options.common_options
@cfy.pass_logger
def unset(manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
rest_certificate,
kerberos_env,
skip_credentials_validation,
logger):
"""Clear the manager username and/or password and/or tenant
from the *current* profile
"""
if not any([manager_username, manager_password, manager_tenant,
rest_certificate, ssh_user, ssh_key, kerberos_env]):
raise CloudifyCliError("You must choose at least one of the following:"
" username, password, tenant, kerberos_env, "
"rest certificate, ssh user, ssh key")
if manager_username:
username = os.environ.get(constants.CLOUDIFY_USERNAME_ENV)
else:
username = env.profile.manager_username
if manager_password:
password = os.environ.get(constants.CLOUDIFY_PASSWORD_ENV)
else:
password = env.profile.manager_password
if manager_tenant:
tenant = os.environ.get(constants.CLOUDIFY_TENANT_ENV)
else:
tenant = env.profile.manager_tenant
if rest_certificate:
cert = os.environ.get(constants.LOCAL_REST_CERT_FILE) \
or env.get_default_rest_cert_local_path()
else:
cert = None
if not skip_credentials_validation:
_validate_credentials(username,
password,
tenant,
cert,
env.profile.rest_protocol,
env.profile.rest_port,
None)
if manager_username:
logger.info('Clearing manager username')
env.profile.manager_username = None
if manager_password:
logger.info('Clearing manager password')
env.profile.manager_password = None
if manager_tenant:
logger.info('Clearing manager tenant')
env.profile.manager_tenant = None
if rest_certificate:
logger.info('Clearing rest certificate')
env.profile.rest_certificate = None
if ssh_user:
logger.info('Clearing ssh user')
env.profile.ssh_user = None
if ssh_key:
logger.info('Clearing ssh key')
env.profile.ssh_key = None
if kerberos_env:
logger.info('Clearing kerberos_env')
env.profile.kerberos_env = None
env.profile.save()
logger.info('Settings saved successfully')
@profiles.command(name='export',
short_help='Export all profiles to an archive')
@cfy.options.include_keys(helptexts.EXPORT_SSH_KEYS)
@cfy.options.optional_output_path
@cfy.options.common_options
@cfy.pass_logger
def export_profiles(include_keys, output_path, logger):
"""Export all profiles to a file
WARNING: Including the ssh keys of your profiles in the archive means
that once the profiles are imported, the ssh keys will be put back
in their original locations!
If `-o / --output-path` is omitted, the archive's name will be
`cfy-profiles.tar.gz`.
"""
_assert_profiles_exist()
destination = output_path or \
os.path.join(os.getcwd(), 'cfy-profiles.tar.gz')
# TODO: Copy exported ssh keys to each profile's directory
logger.info('Exporting profiles to {0}...'.format(destination))
if include_keys:
for profile in env.get_profile_names():
_backup_ssh_key(profile)
utils.tar(env.PROFILES_DIR, destination)
if include_keys:
shutil.rmtree(EXPORTED_SSH_KEYS_DIR)
logger.info('Export complete!')
logger.info(
'You can import the profiles by running '
'`cfy profiles import PROFILES_ARCHIVE`')
@profiles.command(name='import',
short_help='Import profiles from an archive')
@cfy.argument('archive-path')
@cfy.options.include_keys(helptexts.IMPORT_SSH_KEYS)
@cfy.options.common_options
@cfy.pass_logger
def import_profiles(archive_path, include_keys, logger):
"""Import profiles from a profiles archive
WARNING: If a profile exists both in the archive and locally
it will be overwritten (any other profiles will be left intact).
`ARCHIVE_PATH` is the path to the profiles archive to import.
"""
_assert_is_tarfile(archive_path)
_assert_profiles_archive(archive_path)
logger.info('Importing profiles from {0}...'.format(archive_path))
utils.untar(archive_path, os.path.dirname(env.PROFILES_DIR))
if include_keys:
for profile in env.get_profile_names():
_restore_ssh_key(profile)
else:
if EXPORTED_KEYS_DIRNAME in os.listdir(env.PROFILES_DIR):
logger.info("The profiles archive you provided contains ssh keys "
"for one or more profiles. To restore those keys to "
"their original locations, you can use the "
"`--include-keys flag or copy them manually from {0} "
.format(EXPORTED_SSH_KEYS_DIR))
logger.info('Import complete!')
logger.info('You can list profiles using `cfy profiles list`')
def _assert_profiles_exist():
if not env.get_profile_names():
raise CloudifyCliError('No profiles to export')
def _assert_profiles_archive(archive_path):
with closing(tarfile.open(name=archive_path)) as tar:
if not tar.getmembers()[0].name == 'profiles':
raise CloudifyCliError(
'The archive provided does not seem to be a valid '
'Cloudify profiles archive')
def _assert_is_tarfile(archive_path):
if not tarfile.is_tarfile(archive_path):
raise CloudifyCliError('The archive provided must be a tar.gz archive')
def _backup_ssh_key(profile):
return _move_ssh_key(profile, is_backup=True)
def _restore_ssh_key(profile):
return _move_ssh_key(profile, is_backup=False)
@cfy.pass_logger
def _move_ssh_key(profile, logger, is_backup):
"""Iterate through all profiles and move their ssh keys
This is how we backup and restore ssh keys.
"""
context = env.get_profile_context(profile)
key_filepath = context.ssh_key
if key_filepath:
backup_path = os.path.join(
EXPORTED_SSH_KEYS_DIR, os.path.basename(key_filepath)) + \
'.{0}.profile'.format(profile)
if is_backup:
if not os.path.isdir(EXPORTED_SSH_KEYS_DIR):
os.makedirs(EXPORTED_SSH_KEYS_DIR)
logger.info('Copying ssh key {0} to {1}...'.format(
key_filepath, backup_path))
shutil.copy2(key_filepath, backup_path)
else:
if os.path.isfile(backup_path):
logger.info(
'Restoring ssh key for profile {0} to {1}...'.format(
profile, key_filepath))
shutil.move(backup_path, key_filepath)
def _get_profile(profile_name):
current_profile = env.get_active_profile()
env.set_active_profile(profile_name)
context = env.get_profile_context(profile_name)
env.set_active_profile(current_profile)
return context.to_dict()
def _assert_manager_available(client, profile_name):
try:
return client.manager.get_status()
except UserUnauthorizedError, e:
raise CloudifyCliError(
"Can't use manager {0}\n{1}.".format(
profile_name,
str(e)
)
)
# The problem here is that, for instance,
# any problem raised by the rest client will trigger this.
# Triggering a CloudifyClientError only doesn't actually deal
# with situations like No route to host and the likes.
except Exception as ex:
raise CloudifyCliError(
"Can't use manager {0}. {1}".format(profile_name, str(ex.message)))
def _get_provider_context(profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
kerberos_env,
skip_credentials_validation):
try:
client = _get_client_and_assert_manager(
profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
kerberos_env
)
except CloudifyCliError:
if skip_credentials_validation:
return None
raise
try:
response = client.manager.get_context()
return response['context']
except CloudifyClientError:
return None
def _get_client_and_assert_manager(profile_name,
manager_ip=None,
rest_port=None,
rest_protocol=None,
rest_certificate=None,
manager_username=None,
manager_password=None,
manager_tenant=None,
kerberos_env=None):
# Attempt to update the profile with an existing profile context, if one
# is available. This is relevant in case the user didn't pass a username
# or a password, and was expecting them to be taken from the old profile
env.profile = env.get_profile_context(profile_name, suppress_error=True)
client = env.get_rest_client(
rest_host=manager_ip,
rest_port=rest_port,
rest_protocol=rest_protocol,
rest_cert=rest_certificate,
username=manager_username,
password=manager_password,
tenant_name=manager_tenant,
kerberos_env=kerberos_env
)
_assert_manager_available(client, profile_name)
return client
def _set_profile_context(profile_name,
provider_context,
manager_ip,
ssh_key,
ssh_user,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
rest_protocol,
rest_certificate,
kerberos_env):
profile = env.get_profile_context(profile_name)
profile.provider_context = provider_context
if profile_name:
profile.profile_name = profile_name
if manager_ip:
profile.manager_ip = manager_ip
if ssh_key:
profile.ssh_key = ssh_key
if ssh_user:
profile.ssh_user = ssh_user
if rest_port:
profile.rest_port = rest_port
if manager_username:
profile.manager_username = manager_username
if manager_password:
profile.manager_password = manager_password
if manager_tenant:
profile.manager_tenant = manager_tenant
profile.ssh_port = ssh_port or constants.REMOTE_EXECUTION_PORT
profile.rest_protocol = rest_protocol
profile.rest_certificate = rest_certificate
profile.kerberos_env = kerberos_env
profile.save()
def _is_manager_secured(response_history):
""" Checks if the manager is secured (ssl enabled)
The manager is secured if the request was redirected to https
"""
if response_history:
first_response = response_history[0]
return first_response.is_redirect \
and first_response.headers['location'].startswith('https')
return False
def _get_ssl_indication(ssl):
if ssl is None:
return None
return str(ssl).lower() == 'on'
def _get_ssl_protocol_and_port(ssl):
if ssl is not None:
protocol, port = (constants.SECURED_REST_PROTOCOL,
constants.SECURED_REST_PORT) if ssl else \
(constants.DEFAULT_REST_PROTOCOL, constants.DEFAULT_REST_PORT)
else:
protocol, port = None, None
return protocol, port
@cfy.pass_logger
def _validate_credentials(username, password, tenant, certificate, protocol,
rest_port, kerberos_env, logger):
logger.info('Validating credentials...')
_get_client_and_assert_manager(
profile_name=env.profile.profile_name,
manager_username=username,
manager_password=password,
manager_tenant=tenant,
rest_certificate=certificate,
rest_protocol=protocol,
rest_port=rest_port,
kerberos_env=kerberos_env
)
logger.info('Credentials validated')
CY-2225 fix update to dict in profiles set (#1042)
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import shutil
import tarfile
from contextlib import closing
from cloudify.utils import get_kerberos_indication
from cloudify.cluster_status import CloudifyNodeType
from cloudify_rest_client.exceptions import (CloudifyClientError,
UserUnauthorizedError)
from . import init
from .. import env
from .. import utils
from ..cli import cfy
from .. import constants
from ..cli import helptexts
from ..env import get_rest_client
from ..exceptions import CloudifyCliError
from ..table import print_data, print_single
from ..commands.cluster import _all_in_one_manager
from ..commands.cluster import update_profile_logic as update_cluster_profile
EXPORTED_KEYS_DIRNAME = '.exported-ssh-keys'
EXPORTED_SSH_KEYS_DIR = os.path.join(env.PROFILES_DIR, EXPORTED_KEYS_DIRNAME)
PROFILE_COLUMNS = ['name', 'manager_ip', 'manager_username', 'manager_tenant',
'ssh_user', 'ssh_key_path', 'ssh_port', 'kerberos_env',
'rest_port', 'rest_protocol', 'rest_certificate']
CLUSTER_PROFILE_COLUMNS = PROFILE_COLUMNS[:1] + ['hostname', 'host_ip'] \
+ PROFILE_COLUMNS[2:]
@cfy.group(name='profiles')
@cfy.options.common_options
def profiles():
"""
Handle Cloudify CLI profiles
Each profile can manage a single Cloudify manager.
A profile is automatically created when using the `cfy profiles use`
command.
Profiles are named according to the IP of the manager they manage.
"""
if not env.is_initialized():
init.init_local_profile()
def _format_cluster_profile(profile):
"""
Format the list of cluster nodes for display in `cfy cluster show`,
we show the profile details of every stored cluster node.
"""
common_attributes = {k: profile.get(k) for k in CLUSTER_PROFILE_COLUMNS}
nodes = []
for node in profile['cluster'][CloudifyNodeType.MANAGER]:
# merge the common attrs with node data, but rename node's name
# attribute to cluster_node, because the attribute 'name' is
# reserved for the profile name
node_data = dict(node)
node_data['hostname'] = node_data.pop('hostname')
nodes.append(dict(common_attributes, **node_data))
return nodes
@profiles.command(name='show-current',
short_help='Retrieve current profile information')
@cfy.options.common_options
@cfy.pass_logger
def show(logger):
"""
Shows your current active profile and it's properties
"""
active_profile_name = env.get_active_profile()
if active_profile_name == 'local':
logger.info("You're currently working in local mode. "
"To use a manager run `cfy profiles use MANAGER_IP`")
return
active_profile = _get_profile(env.get_active_profile())
if active_profile.get('cluster'):
print_data(CLUSTER_PROFILE_COLUMNS,
_format_cluster_profile(active_profile),
'Cluster nodes in profile {0}:'
.format(active_profile['name']),
labels={
'profile_name': 'Name',
'hostname': 'Manager hostname',
'host_ip': 'Manager ip'})
else:
print_single(PROFILE_COLUMNS, active_profile, 'Active profile:')
@profiles.command(name='list',
short_help='List profiles')
@cfy.options.common_options
@cfy.pass_logger
def list(logger):
"""
List all profiles
"""
current_profile = env.get_active_profile()
profiles = []
profile_names = env.get_profile_names()
for profile in profile_names:
profile_data = _get_profile(profile)
if profile == current_profile:
# Show the currently active profile by appending *
profile_data['name'] = '*' + profile_data['name']
profiles.append(profile_data)
if profiles:
logger.info('Listing all profiles...')
print_data(PROFILE_COLUMNS, profiles, 'Profiles:')
if not profile_names:
logger.info(
'No profiles found. You can create a new profile '
'by using an existing manager via the `cfy profiles use` command')
@profiles.command(name='use',
short_help='Control a specific manager')
@cfy.argument('manager-ip')
@cfy.options.profile_name
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.manager_username
@cfy.options.manager_password
@cfy.options.manager_tenant
@cfy.options.rest_port
@cfy.options.ssl_rest
@cfy.options.rest_certificate
@cfy.options.kerberos_env
@cfy.options.skip_credentials_validation
@cfy.options.common_options
@cfy.pass_logger
def use(manager_ip,
profile_name,
skip_credentials_validation,
logger,
**kwargs):
"""Control a specific manager
`PROFILE_NAME` can be either a manager IP or `local`.
Additional CLI commands will be added after a manager is used.
To stop using a manager, you can run `cfy init -r`.
"""
if not profile_name:
profile_name = manager_ip
if profile_name == 'local':
logger.info('Using local environment...')
if not env.is_profile_exists(profile_name):
init.init_local_profile()
env.set_active_profile('local')
return
if env.is_profile_exists(profile_name):
_switch_profile(
manager_ip=manager_ip,
profile_name=profile_name,
logger=logger,
**kwargs)
else:
_create_profile(
manager_ip=manager_ip,
profile_name=profile_name,
skip_credentials_validation=skip_credentials_validation,
logger=logger,
**kwargs)
if not env.profile.manager_username:
return
_update_cluster_profile_to_dict(logger)
def _update_cluster_profile_to_dict(logger):
if type(env.profile.cluster) == type([]): # noqa
env.profile.cluster = dict()
env.profile.save()
client = get_rest_client()
if not _all_in_one_manager(client):
update_cluster_profile(client, logger)
def _switch_profile(manager_ip, profile_name, logger, **kwargs):
# if using an existing profile, it is an error to provide any --option,
# because the way to update an existing profile is `cfy profiles set`
provided_options = [key for key, value in kwargs.items() if value]
if any(provided_options):
logger.warning('Profile {0} already exists. '
'The passed in options are ignored: {1}. '
'To update the profile, use `cfy profiles set`'
.format(profile_name, ', '.join(provided_options)))
env.set_active_profile(profile_name)
logger.info('Using manager {0}'.format(profile_name))
def _create_profile(
manager_ip,
profile_name,
ssh_user,
ssh_key,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
ssl,
rest_certificate,
kerberos_env,
skip_credentials_validation,
logger):
# If REST certificate is provided, then automatically
# assume SSL.
if rest_certificate:
ssl = True
rest_protocol, default_rest_port = _get_ssl_protocol_and_port(ssl)
if not rest_port:
rest_port = default_rest_port
# kerberos_env default is `False` and not `None`
kerberos_env = get_kerberos_indication(kerberos_env) or False
logger.info('Attempting to connect to {0} through port {1}, using {2} '
'(SSL mode: {3})...'.format(manager_ip, rest_port,
rest_protocol, ssl))
# First, attempt to get the provider from the manager - should it fail,
# the manager's profile directory won't be created
provider_context = _get_provider_context(
profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
kerberos_env,
skip_credentials_validation
)
init.init_manager_profile(profile_name=profile_name)
logger.info('Using manager {0} with port {1}'.format(
manager_ip, rest_port))
_set_profile_context(
profile_name,
provider_context,
manager_ip,
ssh_key,
ssh_user,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
rest_protocol,
rest_certificate,
kerberos_env
)
env.set_active_profile(profile_name)
@profiles.command(name='delete',
short_help='Delete a profile')
@cfy.argument('profile-name')
@cfy.options.common_options
@cfy.pass_logger
def delete(profile_name, logger):
"""Delete a profile
`PROFILE_NAME` is the IP of the manager the profile manages.
"""
logger.info('Deleting profile {0}...'.format(profile_name))
try:
env.delete_profile(profile_name)
logger.info('Profile deleted')
except CloudifyCliError as ex:
logger.info(str(ex))
def set_profile(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
rest_port,
kerberos_env,
skip_credentials_validation,
logger):
"""Set the profile name, manager username and/or password and/or tenant
and/or ssl state (on/off) in the *current* profile
"""
if not any([profile_name, ssh_user, ssh_key, ssh_port, manager_username,
manager_password, manager_tenant, ssl is not None,
rest_certificate, kerberos_env is not None]):
raise CloudifyCliError(
"You must supply at least one of the following: "
"profile name, username, password, tenant, "
"ssl, rest certificate, ssh user, ssh key, ssh port, kerberos env")
username = manager_username or env.get_username()
password = manager_password or env.get_password()
tenant = manager_tenant or env.get_tenant_name()
protocol, port = _get_ssl_protocol_and_port(ssl)
if rest_port is not None:
port = rest_port
if not skip_credentials_validation:
_validate_credentials(username,
password,
tenant,
rest_certificate,
protocol,
port,
kerberos_env)
old_name = None
if profile_name:
if profile_name == 'local':
raise CloudifyCliError('Cannot use the reserved name "local"')
if env.is_profile_exists(profile_name):
raise CloudifyCliError('Profile {0} already exists'
.format(profile_name))
old_name = env.profile.profile_name
env.profile.profile_name = profile_name
if manager_username:
logger.info('Setting username to `{0}`'.format(manager_username))
env.profile.manager_username = manager_username
if manager_password:
logger.info('Setting password to `{0}`'.format(manager_password))
env.profile.manager_password = manager_password
if manager_tenant:
logger.info('Setting tenant to `{0}`'.format(manager_tenant))
env.profile.manager_tenant = manager_tenant
if rest_certificate:
logger.info(
'Setting rest certificate to `{0}`'.format(rest_certificate))
env.profile.rest_certificate = rest_certificate
if rest_port:
logger.info('Setting rest port to `{0}'.format(rest_port))
env.profile.rest_port = rest_port
if ssh_user:
logger.info('Setting ssh user to `{0}`'.format(ssh_user))
env.profile.ssh_user = ssh_user
if ssh_key:
logger.info('Setting ssh key to `{0}`'.format(ssh_key))
env.profile.ssh_key = ssh_key
if ssh_port:
logger.info('Setting ssh port to `{0}`'.format(ssh_port))
env.profile.ssh_port = ssh_port
if kerberos_env is not None:
logger.info('Setting kerberos_env to `{0}`'.format(kerberos_env))
env.profile.kerberos_env = kerberos_env
if ssl is not None:
_set_profile_ssl(ssl, rest_port, logger)
env.profile.save()
if old_name is not None:
env.set_active_profile(profile_name)
env.delete_profile(old_name)
logger.info('Settings saved successfully')
def _set_profile_ssl(ssl, rest_port, logger):
if ssl is None:
raise CloudifyCliError('Internal error: SSL must be either `on` or '
'`off`')
protocol, port = _get_ssl_protocol_and_port(ssl)
if rest_port is not None:
port = rest_port
if protocol == constants.SECURED_REST_PROTOCOL:
logger.info('Enabling SSL in the local profile')
else:
logger.info('Disabling SSL in the local profile')
env.profile.rest_port = port
env.profile.rest_protocol = protocol
_update_cluster_profile_to_dict(logger)
manager_cluster = env.profile.cluster.get(CloudifyNodeType.MANAGER)
if manager_cluster:
missing_certs = []
for node in manager_cluster:
node['rest_port'] = port
node['rest_protocol'] = protocol
logger.info('Enabling SSL for {0}'.format(node['host_ip']))
if not node.get('cert'):
missing_certs.append(node['hostname'])
if missing_certs:
logger.warning('The following cluster nodes have no certificate '
'set: {0}'.format(', '.join(missing_certs)))
logger.warning('If required, set the certificates for those '
'nodes using `cfy profiles set-cluster`')
@profiles.command(
name='set',
short_help='Set name/manager username/password/tenant in current profile')
@cfy.options.profile_name
@cfy.options.manager_username
@cfy.options.manager_password
@cfy.options.manager_tenant
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.ssl_state
@cfy.options.rest_certificate
@cfy.options.rest_port
@cfy.options.kerberos_env
@cfy.options.skip_credentials_validation
@cfy.options.common_options
@cfy.pass_logger
def set_cmd(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
rest_port,
kerberos_env,
skip_credentials_validation,
logger):
return set_profile(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
_get_ssl_indication(ssl),
rest_certificate,
rest_port,
get_kerberos_indication(kerberos_env),
skip_credentials_validation,
logger)
@profiles.command(
name='set-cluster',
short_help='Set connection options for a cluster node')
@cfy.argument('cluster-node-name')
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.rest_certificate
@cfy.pass_logger
def set_cluster(cluster_node_name,
ssh_user,
ssh_key,
ssh_port,
rest_certificate,
logger):
"""Set connection options for a Manager cluster node.
`CLUSTER_NODE_NAME` is the Manager cluster node name to set options for.
"""
manager_cluster = env.profile.cluster.get(CloudifyNodeType.MANAGER)
if not manager_cluster:
err = CloudifyCliError('The current profile is not a cluster profile!')
err.possible_solutions = [
"Select a different profile using `cfy profiles use`",
"Run `cfy cluster update-profile`"
]
raise err
changed_node = None
for node in manager_cluster:
if node['hostname'] == cluster_node_name:
changed_node = node
break
else:
raise CloudifyCliError(
'Node {0} not found in the cluster'.format(cluster_node_name))
for source, target, label in [
(ssh_user, 'ssh_user', 'ssh user'),
(ssh_key, 'ssh_key', 'ssh key'),
(ssh_port, 'ssh_port', 'ssh port'),
]:
if source:
changed_node[target] = source
logger.info('Node {0}: setting {1} to `{2}`'
.format(cluster_node_name, label, source))
if rest_certificate:
changed_node['cert'] = rest_certificate
changed_node['trust_all'] = False
changed_node['rest_protocol'] = 'https'
logger.info('Node {0}: setting rest-certificate to `{1}` and enabling '
'certificate verification'
.format(cluster_node_name, source))
env.profile.save()
logger.info('Settings saved successfully')
@profiles.command(
name='unset',
short_help='Clear manager username/password/tenant from current profile')
@cfy.options.manager_username_flag
@cfy.options.manager_password_flag
@cfy.options.manager_tenant_flag
@cfy.options.ssh_user_flag
@cfy.options.ssh_key_flag
@cfy.options.rest_certificate_flag
@cfy.options.kerberos_env_flag
@cfy.options.skip_credentials_validation
@cfy.options.common_options
@cfy.pass_logger
def unset(manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
rest_certificate,
kerberos_env,
skip_credentials_validation,
logger):
"""Clear the manager username and/or password and/or tenant
from the *current* profile
"""
if not any([manager_username, manager_password, manager_tenant,
rest_certificate, ssh_user, ssh_key, kerberos_env]):
raise CloudifyCliError("You must choose at least one of the following:"
" username, password, tenant, kerberos_env, "
"rest certificate, ssh user, ssh key")
if manager_username:
username = os.environ.get(constants.CLOUDIFY_USERNAME_ENV)
else:
username = env.profile.manager_username
if manager_password:
password = os.environ.get(constants.CLOUDIFY_PASSWORD_ENV)
else:
password = env.profile.manager_password
if manager_tenant:
tenant = os.environ.get(constants.CLOUDIFY_TENANT_ENV)
else:
tenant = env.profile.manager_tenant
if rest_certificate:
cert = os.environ.get(constants.LOCAL_REST_CERT_FILE) \
or env.get_default_rest_cert_local_path()
else:
cert = None
if not skip_credentials_validation:
_validate_credentials(username,
password,
tenant,
cert,
env.profile.rest_protocol,
env.profile.rest_port,
None)
if manager_username:
logger.info('Clearing manager username')
env.profile.manager_username = None
if manager_password:
logger.info('Clearing manager password')
env.profile.manager_password = None
if manager_tenant:
logger.info('Clearing manager tenant')
env.profile.manager_tenant = None
if rest_certificate:
logger.info('Clearing rest certificate')
env.profile.rest_certificate = None
if ssh_user:
logger.info('Clearing ssh user')
env.profile.ssh_user = None
if ssh_key:
logger.info('Clearing ssh key')
env.profile.ssh_key = None
if kerberos_env:
logger.info('Clearing kerberos_env')
env.profile.kerberos_env = None
env.profile.save()
logger.info('Settings saved successfully')
@profiles.command(name='export',
short_help='Export all profiles to an archive')
@cfy.options.include_keys(helptexts.EXPORT_SSH_KEYS)
@cfy.options.optional_output_path
@cfy.options.common_options
@cfy.pass_logger
def export_profiles(include_keys, output_path, logger):
"""Export all profiles to a file
WARNING: Including the ssh keys of your profiles in the archive means
that once the profiles are imported, the ssh keys will be put back
in their original locations!
If `-o / --output-path` is omitted, the archive's name will be
`cfy-profiles.tar.gz`.
"""
_assert_profiles_exist()
destination = output_path or \
os.path.join(os.getcwd(), 'cfy-profiles.tar.gz')
# TODO: Copy exported ssh keys to each profile's directory
logger.info('Exporting profiles to {0}...'.format(destination))
if include_keys:
for profile in env.get_profile_names():
_backup_ssh_key(profile)
utils.tar(env.PROFILES_DIR, destination)
if include_keys:
shutil.rmtree(EXPORTED_SSH_KEYS_DIR)
logger.info('Export complete!')
logger.info(
'You can import the profiles by running '
'`cfy profiles import PROFILES_ARCHIVE`')
@profiles.command(name='import',
short_help='Import profiles from an archive')
@cfy.argument('archive-path')
@cfy.options.include_keys(helptexts.IMPORT_SSH_KEYS)
@cfy.options.common_options
@cfy.pass_logger
def import_profiles(archive_path, include_keys, logger):
"""Import profiles from a profiles archive
WARNING: If a profile exists both in the archive and locally
it will be overwritten (any other profiles will be left intact).
`ARCHIVE_PATH` is the path to the profiles archive to import.
"""
_assert_is_tarfile(archive_path)
_assert_profiles_archive(archive_path)
logger.info('Importing profiles from {0}...'.format(archive_path))
utils.untar(archive_path, os.path.dirname(env.PROFILES_DIR))
if include_keys:
for profile in env.get_profile_names():
_restore_ssh_key(profile)
else:
if EXPORTED_KEYS_DIRNAME in os.listdir(env.PROFILES_DIR):
logger.info("The profiles archive you provided contains ssh keys "
"for one or more profiles. To restore those keys to "
"their original locations, you can use the "
"`--include-keys flag or copy them manually from {0} "
.format(EXPORTED_SSH_KEYS_DIR))
logger.info('Import complete!')
logger.info('You can list profiles using `cfy profiles list`')
def _assert_profiles_exist():
if not env.get_profile_names():
raise CloudifyCliError('No profiles to export')
def _assert_profiles_archive(archive_path):
with closing(tarfile.open(name=archive_path)) as tar:
if not tar.getmembers()[0].name == 'profiles':
raise CloudifyCliError(
'The archive provided does not seem to be a valid '
'Cloudify profiles archive')
def _assert_is_tarfile(archive_path):
if not tarfile.is_tarfile(archive_path):
raise CloudifyCliError('The archive provided must be a tar.gz archive')
def _backup_ssh_key(profile):
return _move_ssh_key(profile, is_backup=True)
def _restore_ssh_key(profile):
return _move_ssh_key(profile, is_backup=False)
@cfy.pass_logger
def _move_ssh_key(profile, logger, is_backup):
"""Iterate through all profiles and move their ssh keys
This is how we backup and restore ssh keys.
"""
context = env.get_profile_context(profile)
key_filepath = context.ssh_key
if key_filepath:
backup_path = os.path.join(
EXPORTED_SSH_KEYS_DIR, os.path.basename(key_filepath)) + \
'.{0}.profile'.format(profile)
if is_backup:
if not os.path.isdir(EXPORTED_SSH_KEYS_DIR):
os.makedirs(EXPORTED_SSH_KEYS_DIR)
logger.info('Copying ssh key {0} to {1}...'.format(
key_filepath, backup_path))
shutil.copy2(key_filepath, backup_path)
else:
if os.path.isfile(backup_path):
logger.info(
'Restoring ssh key for profile {0} to {1}...'.format(
profile, key_filepath))
shutil.move(backup_path, key_filepath)
def _get_profile(profile_name):
current_profile = env.get_active_profile()
env.set_active_profile(profile_name)
context = env.get_profile_context(profile_name)
env.set_active_profile(current_profile)
return context.to_dict()
def _assert_manager_available(client, profile_name):
try:
return client.manager.get_status()
except UserUnauthorizedError, e:
raise CloudifyCliError(
"Can't use manager {0}\n{1}.".format(
profile_name,
str(e)
)
)
# The problem here is that, for instance,
# any problem raised by the rest client will trigger this.
# Triggering a CloudifyClientError only doesn't actually deal
# with situations like No route to host and the likes.
except Exception as ex:
raise CloudifyCliError(
"Can't use manager {0}. {1}".format(profile_name, str(ex.message)))
def _get_provider_context(profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
kerberos_env,
skip_credentials_validation):
try:
client = _get_client_and_assert_manager(
profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
kerberos_env
)
except CloudifyCliError:
if skip_credentials_validation:
return None
raise
try:
response = client.manager.get_context()
return response['context']
except CloudifyClientError:
return None
def _get_client_and_assert_manager(profile_name,
manager_ip=None,
rest_port=None,
rest_protocol=None,
rest_certificate=None,
manager_username=None,
manager_password=None,
manager_tenant=None,
kerberos_env=None):
# Attempt to update the profile with an existing profile context, if one
# is available. This is relevant in case the user didn't pass a username
# or a password, and was expecting them to be taken from the old profile
env.profile = env.get_profile_context(profile_name, suppress_error=True)
client = env.get_rest_client(
rest_host=manager_ip,
rest_port=rest_port,
rest_protocol=rest_protocol,
rest_cert=rest_certificate,
username=manager_username,
password=manager_password,
tenant_name=manager_tenant,
kerberos_env=kerberos_env
)
_assert_manager_available(client, profile_name)
return client
def _set_profile_context(profile_name,
provider_context,
manager_ip,
ssh_key,
ssh_user,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
rest_protocol,
rest_certificate,
kerberos_env):
profile = env.get_profile_context(profile_name)
profile.provider_context = provider_context
if profile_name:
profile.profile_name = profile_name
if manager_ip:
profile.manager_ip = manager_ip
if ssh_key:
profile.ssh_key = ssh_key
if ssh_user:
profile.ssh_user = ssh_user
if rest_port:
profile.rest_port = rest_port
if manager_username:
profile.manager_username = manager_username
if manager_password:
profile.manager_password = manager_password
if manager_tenant:
profile.manager_tenant = manager_tenant
profile.ssh_port = ssh_port or constants.REMOTE_EXECUTION_PORT
profile.rest_protocol = rest_protocol
profile.rest_certificate = rest_certificate
profile.kerberos_env = kerberos_env
profile.save()
def _is_manager_secured(response_history):
""" Checks if the manager is secured (ssl enabled)
The manager is secured if the request was redirected to https
"""
if response_history:
first_response = response_history[0]
return first_response.is_redirect \
and first_response.headers['location'].startswith('https')
return False
def _get_ssl_indication(ssl):
if ssl is None:
return None
return str(ssl).lower() == 'on'
def _get_ssl_protocol_and_port(ssl):
if ssl is not None:
protocol, port = (constants.SECURED_REST_PROTOCOL,
constants.SECURED_REST_PORT) if ssl else \
(constants.DEFAULT_REST_PROTOCOL, constants.DEFAULT_REST_PORT)
else:
protocol, port = None, None
return protocol, port
@cfy.pass_logger
def _validate_credentials(username, password, tenant, certificate, protocol,
rest_port, kerberos_env, logger):
logger.info('Validating credentials...')
_get_client_and_assert_manager(
profile_name=env.profile.profile_name,
manager_username=username,
manager_password=password,
manager_tenant=tenant,
rest_certificate=certificate,
rest_protocol=protocol,
rest_port=rest_port,
kerberos_env=kerberos_env
)
logger.info('Credentials validated')
|
#!/usr/bin/env python3
import unittest
import numpy as np
from pgmpy.models import BayesianModel
from pgmpy.models import MarkovModel
from pgmpy.factors import Factor
from pgmpy.factors import TabularCPD
from pgmpy.inference import Inference
from collections import defaultdict
class TestInferenceBase(unittest.TestCase):
def setUp(self):
self.bayesian = BayesianModel([('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e')])
a_cpd = TabularCPD('a', 2, [[0.4, 0.6]])
b_cpd = TabularCPD('b', 2, [[0.2, 0.4], [0.3, 0.4]], evidence='a',
evidence_card=[2])
c_cpd = TabularCPD('c', 2, [[0.1, 0.2], [0.3, 0.4]], evidence='b',
evidence_card=[2])
d_cpd = TabularCPD('d', 2, [[0.4, 0.3], [0.2, 0.1]], evidence='c',
evidence_card=[2])
e_cpd = TabularCPD('e', 2, [[0.3, 0.2], [0.4, 0.1]], evidence='d',
evidence_card=[2])
self.bayesian.add_cpds(a_cpd, b_cpd, c_cpd, d_cpd, e_cpd)
self.markov = MarkovModel([('a', 'b'), ('b', 'd'), ('a', 'c'), ('c', 'd')])
factor_1 = Factor(['a', 'b'], [2, 2], np.array([100, 1, 1, 100]))
factor_2 = Factor(['a', 'c'], [2, 2], np.array([40, 30, 100, 20]))
factor_3 = Factor(['b', 'd'], [2, 2], np.array([1, 100, 100, 1]))
factor_4 = Factor(['c', 'd'], [2, 2], np.array([60, 60, 40, 40]))
self.markov.add_factors(factor_1, factor_2, factor_3, factor_4)
def test_bayesian_inference_init(self):
infer_bayesian = Inference(self.bayesian)
self.assertEqual(set(infer_bayesian.variables), {'a', 'b', 'c', 'd', 'e'})
self.assertEqual(infer_bayesian.cardinality, {'a': 2, 'b': 2, 'c': 2,
'd': 2, 'e': 2})
self.assertIsInstance(infer_bayesian.factors, defaultdict)
self.assertEqual(set(infer_bayesian.factors['a']),
set([self.bayesian.get_cpds('a').to_factor(),
self.bayesian.get_cpds('b').to_factor()]))
self.assertEqual(set(infer_bayesian.factors['b']),
set([self.bayesian.get_cpds('b').to_factor(),
self.bayesian.get_cpds('c').to_factor()]))
self.assertEqual(set(infer_bayesian.factors['c']),
set([self.bayesian.get_cpds('c').to_factor(),
self.bayesian.get_cpds('d').to_factor()]))
self.assertEqual(set(infer_bayesian.factors['d']),
set([self.bayesian.get_cpds('d').to_factor(),
self.bayesian.get_cpds('e').to_factor()]))
self.assertEqual(set(infer_bayesian.factors['e']),
set([self.bayesian.get_cpds('e').to_factor()]))
def test_markov_inference_init(self):
infer_markov = Inference(self.markov)
self.assertEqual(set(infer_markov.variables), {'a', 'b', 'c', 'd'})
self.assertEqual(infer_markov.cardinality, {'a': 2, 'b': 2, 'c': 2, 'd': 2})
self.assertEqual(infer_markov.factors, {'a': [Factor(['a', 'b'], [2, 2],
np.array([100, 1, 1, 100])),
Factor(['a', 'c'], [2, 2],
np.array([40, 30, 100, 20]))],
'b': [Factor(['a', 'b'], [2, 2],
np.array([100, 1, 1, 100])),
Factor(['b', 'd'], [2, 2],
np.array([1, 100, 100, 1]))],
'c': [Factor(['a', 'c'], [2, 2],
np.array([40, 30, 100, 20])),
Factor(['c', 'd'], [2, 2],
np.array([60, 60, 40, 40]))],
'd': [Factor(['b', 'd'], [2, 2],
np.array([1, 100, 100, 1])),
Factor(['c', 'd'], [2, 2],
np.array([60, 60, 40, 40]))]})
fixed bug in test_inference related to check_model
#!/usr/bin/env python3
import unittest
import numpy as np
from pgmpy.models import BayesianModel
from pgmpy.models import MarkovModel
from pgmpy.factors import Factor
from pgmpy.factors import TabularCPD
from pgmpy.inference import Inference
from collections import defaultdict
class TestInferenceBase(unittest.TestCase):
def setUp(self):
self.bayesian = BayesianModel([('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'e')])
a_cpd = TabularCPD('a', 2, [[0.4, 0.6]])
b_cpd = TabularCPD('b', 2, [[0.2, 0.4], [0.8, 0.6]], evidence='a',
evidence_card=[2])
c_cpd = TabularCPD('c', 2, [[0.1, 0.2], [0.9, 0.8]], evidence='b',
evidence_card=[2])
d_cpd = TabularCPD('d', 2, [[0.4, 0.3], [0.6, 0.7]], evidence='c',
evidence_card=[2])
e_cpd = TabularCPD('e', 2, [[0.3, 0.2], [0.7, 0.8]], evidence='d',
evidence_card=[2])
self.bayesian.add_cpds(a_cpd, b_cpd, c_cpd, d_cpd, e_cpd)
self.markov = MarkovModel([('a', 'b'), ('b', 'd'), ('a', 'c'), ('c', 'd')])
factor_1 = Factor(['a', 'b'], [2, 2], np.array([100, 1, 1, 100]))
factor_2 = Factor(['a', 'c'], [2, 2], np.array([40, 30, 100, 20]))
factor_3 = Factor(['b', 'd'], [2, 2], np.array([1, 100, 100, 1]))
factor_4 = Factor(['c', 'd'], [2, 2], np.array([60, 60, 40, 40]))
self.markov.add_factors(factor_1, factor_2, factor_3, factor_4)
def test_bayesian_inference_init(self):
infer_bayesian = Inference(self.bayesian)
self.assertEqual(set(infer_bayesian.variables), {'a', 'b', 'c', 'd', 'e'})
self.assertEqual(infer_bayesian.cardinality, {'a': 2, 'b': 2, 'c': 2,
'd': 2, 'e': 2})
self.assertIsInstance(infer_bayesian.factors, defaultdict)
self.assertEqual(set(infer_bayesian.factors['a']),
set([self.bayesian.get_cpds('a').to_factor(),
self.bayesian.get_cpds('b').to_factor()]))
self.assertEqual(set(infer_bayesian.factors['b']),
set([self.bayesian.get_cpds('b').to_factor(),
self.bayesian.get_cpds('c').to_factor()]))
self.assertEqual(set(infer_bayesian.factors['c']),
set([self.bayesian.get_cpds('c').to_factor(),
self.bayesian.get_cpds('d').to_factor()]))
self.assertEqual(set(infer_bayesian.factors['d']),
set([self.bayesian.get_cpds('d').to_factor(),
self.bayesian.get_cpds('e').to_factor()]))
self.assertEqual(set(infer_bayesian.factors['e']),
set([self.bayesian.get_cpds('e').to_factor()]))
def test_markov_inference_init(self):
infer_markov = Inference(self.markov)
self.assertEqual(set(infer_markov.variables), {'a', 'b', 'c', 'd'})
self.assertEqual(infer_markov.cardinality, {'a': 2, 'b': 2, 'c': 2, 'd': 2})
self.assertEqual(infer_markov.factors, {'a': [Factor(['a', 'b'], [2, 2],
np.array([100, 1, 1, 100])),
Factor(['a', 'c'], [2, 2],
np.array([40, 30, 100, 20]))],
'b': [Factor(['a', 'b'], [2, 2],
np.array([100, 1, 1, 100])),
Factor(['b', 'd'], [2, 2],
np.array([1, 100, 100, 1]))],
'c': [Factor(['a', 'c'], [2, 2],
np.array([40, 30, 100, 20])),
Factor(['c', 'd'], [2, 2],
np.array([60, 60, 40, 40]))],
'd': [Factor(['b', 'd'], [2, 2],
np.array([1, 100, 100, 1])),
Factor(['c', 'd'], [2, 2],
np.array([60, 60, 40, 40]))]})
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monitors git repositories and Cloud Builds.
Periodically polls git repositories to detect changes and kick off Cloud Build
workflows.
"""
import asyncio
import datetime
import logging
import os
import re
import uuid
# Extract just the git tag from the output of 'git ls-remote --refs --tags'.
GIT_TAG_REGEX = r'refs/tags/(r[a-z0-9_\.]+)'
# Extract the commit hash and the reference name from the output of
# 'git ls-remote --refs'. The exact regex for a reference name is tricky as seen
# on StackOverflow (https://stackoverflow.com/questions/12093748). Since this
# regex is parsing the output of the git command, we will assume it is well
# formatted and just limit the length.
GIT_HASH_REFNAME_REGEX = r'^([0-9a-f]{40})\s+(refs/[^\s]{1,64})$'
# Extract the Cloud Build UUID from the text sent to stdout when a build is
# started with "gcloud builds submit ... --async".
# Example: 16fd2706-8baf-433b-82eb-8c7fada847da
# See https://docs.python.org/3/library/uuid.html for more UUID info.
GCB_ASYNC_BUILD_ID_REGEX = (
r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
# Limit on the total number of ref filters.
MAX_REF_FILTERS = 5
# Route logs to StackDriver when running in the Cloud. The Google Cloud logging
# library enables logs for INFO level by default.
# Adapted from the "Setting up StackDriver Logging for Python" page at
# https://cloud.google.com/logging/docs/setup/python
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
import google.auth.exceptions
import google.cloud.logging
client = google.cloud.logging.Client()
logger.addHandler(client.get_default_handler())
except (ImportError, google.auth.exceptions.GoogleAuthError) as ex:
logger.addHandler(logging.StreamHandler())
def make_subprocess_cmd(cmd):
"""Creates a function that returns an async subprocess.
Args:
cmd: Command to run in the subprocess. Arguments should be provided when
calling the returned function.
Returns:
A function that creates an asyncio.subprocess.Process instance.
"""
def subprocess_cmd(*args, cwd=None):
logger.info('Running "%s %s"', cmd, ' '.join(args))
return asyncio.create_subprocess_exec(
cmd, *args, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE, cwd=cwd)
return subprocess_cmd
class GitPatrolCommands:
def __init__(self):
self.git = make_subprocess_cmd('git')
self.gcloud = make_subprocess_cmd('gcloud')
async def git_check_ref_filter(commands, ref_filter):
"""Use the git command to validate a ref filter.
Args:
commands: GitPatrolCommands object used to execute external commands.
ref_filter: The git ref filter to validate.
Returns:
True for a valid ref filter. False otherwise.
"""
git_subproc = await commands.git(
'check-ref-format', '--allow-onelevel', '--refspec-pattern', ref_filter)
await git_subproc.communicate()
returncode = await git_subproc.wait()
return returncode == 0
async def fetch_git_refs(commands, url, ref_filters):
"""Fetch tags and HEADs from the provided git repository URL.
Use 'git ls-remote --refs' to fetch the current list of references from the
repository. If successful the information is returned as a dictionary. The
dictionary keys will be the full reference names and the values will be the
commit hash associated with that reference.
Example:
{
'refs/heads/master': '039de508998f3676871ed8cc00e3b33f0f95f7cb',
'refs/heads/branch0': 'c589a4d44889afa2e6f811852b4575df7287abcd'
'refs/tags/tag0': 'aaa2aa362047ec750359ccf42eee159db5f62726',
'refs/tags/tag1': 'bbb7626c1d6b48d5509db048e290b1642a6766c4'
}
Args:
commands: GitPatrolCommands object used to execute external commands.
url: URL of git repo to retrieve refs from.
ref_filters: A (possibly empty) list of ref filters to pass to the
'git ls-remote' command to filter the returned refs.
Returns:
Returns a dictionary of git references and commit hashes retrieved from the
repository if successful. Returns None when the underlying git command
fails.
"""
git_subproc = await commands.git('ls-remote', '--refs', url, *ref_filters)
stdout, _ = await git_subproc.communicate()
returncode = await git_subproc.wait()
if returncode:
logger.warning('git ls-remote returned %d', returncode)
return None
# Note that re.findall() returns group matches as tuples, so a conversion to
# lists is necessary.
raw_refs = stdout.decode('utf-8', 'ignore')
refs = re.findall(GIT_HASH_REFNAME_REGEX, raw_refs, re.MULTILINE)
return {refname: commit for (commit, refname) in refs}
async def cloud_build_start(commands, config_path, config, git_ref):
"""Submit a new workflow to Google Cloud Build.
Args:
commands: GitPatrolCommands object used to execute external commands.
config_path: Path to the Cloud Build configuration sources.
config: Configuration object to read Cloud Build config from.
git_ref: The git ref (ex: refs/heads/master, refs/tags/v0.0.1) that
triggered this workflow execution.
Returns:
The UUID of the newly created Cloud Build workflow if successful. Otherwise
returns None.
"""
arg_config = '--config={}'.format(os.path.join(config_path, config['config']))
# Provide a few default substitutions that Google Cloud Build would fill in
# if it was launching a triggered workflow. See link for details...
# https://cloud.google.com/cloud-build/docs/configuring-builds/substitute-variable-values
arg_substitutions = '--substitutions='
if git_ref.startswith('refs/tags/'):
arg_substitutions += 'TAG_NAME={},'.format(
git_ref.replace('refs/tags/', ''))
elif git_ref.startswith('refs/heads/'):
arg_substitutions += 'BRANCH_NAME={},'.format(
git_ref.replace('refs/heads/', ''))
# Generate a substitution string from the target config.
# TODO(brian): Handle no substitutions.
subs_config = (
','.join(
'{!s}={!s}'.format(
k, v) for (k, v) in config['substitutions'].items()))
arg_substitutions += subs_config
arg_sources = os.path.join(config_path, config['sources'])
gcloud_subproc = await commands.gcloud(
'builds', 'submit', '--async', arg_config, arg_substitutions, arg_sources)
stdout_bytes, _ = await gcloud_subproc.communicate()
returncode = await gcloud_subproc.wait()
if returncode:
logger.warning('gcloud builds submit returned %d', returncode)
return None
stdout_lines = stdout_bytes.decode('utf-8', 'ignore').splitlines()
if not stdout_lines:
logger.warning('gcloud builds submit produced no output')
return None
build_info_line = stdout_lines[-1]
build_id_list = re.findall(GCB_ASYNC_BUILD_ID_REGEX, build_info_line)
if not build_id_list or not build_id_list[0]:
logger.fatal('gcloud builds submit output format has changed')
return None
cloud_build_uuid = build_id_list[0]
logger.info('Cloud Build started [ID=%s]', cloud_build_uuid)
return uuid.UUID(hex=cloud_build_uuid)
async def cloud_build_wait(commands, cloud_build_uuid):
"""Wait for a Google Cloud Build workflow to complete.
Args:
commands: MetaMonitorCommands object used to execute external commands.
cloud_build_uuid: UUID of the Cloud Build workflow to wait for.
Returns:
The final Cloud Build workflow state as a JSON string if successful.
Otherwise returns None.
"""
# "Stream" the logs with stdout/stderr disabled because we just care about
# waiting for the workflow to complete. It might also generate a bunch of
# text, so disabling output avoids blowing up the Python heap collecting
# stdout.
logger.info('Waiting for Cloud Build [ID=%s]', cloud_build_uuid)
gcb_log_subproc = await commands.gcloud(
'builds', 'log', '--stream', '--no-user-output-enabled',
str(cloud_build_uuid))
await gcb_log_subproc.communicate()
returncode = await gcb_log_subproc.wait()
if returncode:
logger.warning('gcloud builds log returned %d', returncode)
return None
logger.info('Cloud Build finished [ID=%s]', cloud_build_uuid)
gcb_describe_subproc = await commands.gcloud(
'builds', 'describe', '--format=json', str(cloud_build_uuid))
stdout_bytes, _ = await gcb_describe_subproc.communicate()
returncode = await gcb_describe_subproc.wait()
if returncode:
logger.warning('gcloud builds describe returned %d', returncode)
return None
return stdout_bytes.decode('utf-8', 'ignore')
def git_refs_find_deltas(previous_refs, current_refs):
"""Finds new or updated git refs.
Identifies the new git refs and the git refs whose commit hashes are
different in current refs. Git refs present in previous_refs but missing
from current_refs are ignored.
Args:
previous_refs: Dictionary of git refs to compare against.
current_refs: Dictionary of git refs possibly containing new entries or
updates.
Returns:
A dictionary of the new and updated git refs found in current_refs,
otherwise an empty dictionary.
"""
new_refs = {}
for k, v in current_refs.items():
if k not in previous_refs:
new_refs[k] = v
elif previous_refs[k] != v:
new_refs[k] = v
return new_refs
async def run_workflow_triggers(
commands, db, alias, url, ref_filters, previous_refs):
"""Evaluates workflow trigger conditions.
Poll the remote repository for a list of its git refs. The workflow trigger
will be satisfied if any git refs were added or changed since the last time
the repository was polled.
Args:
commands: GitPatrolCommands object used to execute external commands.
db: A GitPatrolDb object used for database operations.
alias: Human friendly alias of the configuration for this repository.
url: URL of the repository to patrol.
ref_filters: A (possibly empty) list of ref filters to pass to the
'git ls-remote' command to filter the returned refs.
previous_refs: List of git refs to expect in the cloned repository. Any refs
that are now in the repository and not in this list, or any altered refs
will satisfy the workflow trigger.
Returns:
Returns a (dict, dict) tuple. The first item contains a dictionary of the
current git tags in the remote repository. The second item contains a
dictionary of git refs that should trigger a workflow execution.
"""
# Retreive current refs from the remote repo.
current_refs = await fetch_git_refs(commands, url, ref_filters)
if not current_refs:
return previous_refs, {}
# Add a new journal entry with these git refs.
update_time = datetime.datetime.utcnow()
git_refs_uuid = await db.record_git_poll(
update_time, url, alias, current_refs, ref_filters)
if not git_refs_uuid:
logger.warning('%s: failed to record git refs', alias)
return previous_refs, {}
# See if the repository was updated since the last check.
new_refs = git_refs_find_deltas(previous_refs, current_refs)
if not new_refs:
logger.info('%s: no new refs', alias)
return previous_refs, {}
logger.info('%s: new refs: %s', alias, new_refs)
return current_refs, new_refs
async def run_workflow_body(
commands, config_path, config, git_ref):
"""Runs the actual workflow logic.
Args:
commands: GitPatrolCommands object used to execute external commands.
config_path: Path to the Cloud Build configuration sources.
config: Target configuration object.
git_ref: The git ref (ex: refs/heads/master, refs/tags/v0.0.1) that
triggered this workflow execution.
Returns:
True when the workflow completes successfully. False otherwise.
"""
for workflow in config['workflows']:
build_id = await cloud_build_start(commands, config_path, workflow, git_ref)
if not build_id:
return False
status_json = await cloud_build_wait(commands, build_id)
if not status_json:
return False
return True
async def target_loop(
commands, loop, db, config_path, target_config, offset, interval):
"""Main loop to manage periodic workflow execution.
Args:
commands: GitPatrolCommands object used to execute external commands.
loop: A reference to the asyncio event loop in use.
db: A GitPatrolDb object used for database operations.
config_path: Path to files referenced by the target configuration.
target_config: Git Patrol config target information.
offset: Starting offset time in seconds.
interval: Time in seconds to wait between poll attempts.
Returns:
Nothing. Loops forever.
"""
alias = target_config['alias']
url = target_config['url']
ref_filters = []
if 'ref_filters' in target_config:
ref_filters = target_config['ref_filters']
# Validate target configuration.
if len(ref_filters) > MAX_REF_FILTERS:
logger.error('%s: too many ref filters provided', alias)
return
validate_tasks = [git_check_ref_filter(commands, f) for f in ref_filters]
ref_filters_ok = await asyncio.gather(*validate_tasks)
if not all(ref_filters_ok):
logger.error('%s: error in ref filter', alias)
return
# Fetch latest git tags from the database.
current_refs = await db.fetch_latest_refs_by_alias(alias)
logger.info('%s: current refs %s', alias, current_refs)
# Stagger the wakeup time of the target loops to avoid hammering the remote
# server with requests all at once.
next_wakeup_time = loop.time() + offset + 1
while True:
# Calculate the polling loop's next wake-up time. To stay on schedule we
# keep incrementing next_wakeup_time by the polling interval until we
# arrive at a time in the future.
while next_wakeup_time < loop.time():
next_wakeup_time += interval
sleep_time = max(0, next_wakeup_time - loop.time())
logger.info('%s: sleeping for %f', alias, sleep_time)
await asyncio.sleep(sleep_time)
# Evaluate workflow triggers to see if the workflow needs to run again.
current_refs, new_refs = await run_workflow_triggers(
commands, db, alias, url, ref_filters, current_refs)
workflow_tasks = [
run_workflow_body(commands, config_path, target_config, ref)
for ref in new_refs.keys() ]
await asyncio.gather(*workflow_tasks)
Fix typo
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monitors git repositories and Cloud Builds.
Periodically polls git repositories to detect changes and kick off Cloud Build
workflows.
"""
import asyncio
import datetime
import logging
import os
import re
import uuid
# Extract just the git tag from the output of 'git ls-remote --refs --tags'.
GIT_TAG_REGEX = r'refs/tags/(r[a-z0-9_\.]+)'
# Extract the commit hash and the reference name from the output of
# 'git ls-remote --refs'. The exact regex for a reference name is tricky as seen
# on StackOverflow (https://stackoverflow.com/questions/12093748). Since this
# regex is parsing the output of the git command, we will assume it is well
# formatted and just limit the length.
GIT_HASH_REFNAME_REGEX = r'^([0-9a-f]{40})\s+(refs/[^\s]{1,64})$'
# Extract the Cloud Build UUID from the text sent to stdout when a build is
# started with "gcloud builds submit ... --async".
# Example: 16fd2706-8baf-433b-82eb-8c7fada847da
# See https://docs.python.org/3/library/uuid.html for more UUID info.
GCB_ASYNC_BUILD_ID_REGEX = (
r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
# Limit on the total number of ref filters.
MAX_REF_FILTERS = 5
# Route logs to StackDriver when running in the Cloud. The Google Cloud logging
# library enables logs for INFO level by default.
# Adapted from the "Setting up StackDriver Logging for Python" page at
# https://cloud.google.com/logging/docs/setup/python
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
import google.auth.exceptions
import google.cloud.logging
client = google.cloud.logging.Client()
logger.addHandler(client.get_default_handler())
except (ImportError, google.auth.exceptions.GoogleAuthError) as ex:
logger.addHandler(logging.StreamHandler())
def make_subprocess_cmd(cmd):
"""Creates a function that returns an async subprocess.
Args:
cmd: Command to run in the subprocess. Arguments should be provided when
calling the returned function.
Returns:
A function that creates an asyncio.subprocess.Process instance.
"""
def subprocess_cmd(*args, cwd=None):
logger.info('Running "%s %s"', cmd, ' '.join(args))
return asyncio.create_subprocess_exec(
cmd, *args, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE, cwd=cwd)
return subprocess_cmd
class GitPatrolCommands:
def __init__(self):
self.git = make_subprocess_cmd('git')
self.gcloud = make_subprocess_cmd('gcloud')
async def git_check_ref_filter(commands, ref_filter):
"""Use the git command to validate a ref filter.
Args:
commands: GitPatrolCommands object used to execute external commands.
ref_filter: The git ref filter to validate.
Returns:
True for a valid ref filter. False otherwise.
"""
git_subproc = await commands.git(
'check-ref-format', '--allow-onelevel', '--refspec-pattern', ref_filter)
await git_subproc.communicate()
returncode = await git_subproc.wait()
return returncode == 0
async def fetch_git_refs(commands, url, ref_filters):
"""Fetch tags and HEADs from the provided git repository URL.
Use 'git ls-remote --refs' to fetch the current list of references from the
repository. If successful the information is returned as a dictionary. The
dictionary keys will be the full reference names and the values will be the
commit hash associated with that reference.
Example:
{
'refs/heads/master': '039de508998f3676871ed8cc00e3b33f0f95f7cb',
'refs/heads/branch0': 'c589a4d44889afa2e6f811852b4575df7287abcd'
'refs/tags/tag0': 'aaa2aa362047ec750359ccf42eee159db5f62726',
'refs/tags/tag1': 'bbb7626c1d6b48d5509db048e290b1642a6766c4'
}
Args:
commands: GitPatrolCommands object used to execute external commands.
url: URL of git repo to retrieve refs from.
ref_filters: A (possibly empty) list of ref filters to pass to the
'git ls-remote' command to filter the returned refs.
Returns:
Returns a dictionary of git references and commit hashes retrieved from the
repository if successful. Returns None when the underlying git command
fails.
"""
git_subproc = await commands.git('ls-remote', '--refs', url, *ref_filters)
stdout, _ = await git_subproc.communicate()
returncode = await git_subproc.wait()
if returncode:
logger.warning('git ls-remote returned %d', returncode)
return None
# Note that re.findall() returns group matches as tuples, so a conversion to
# lists is necessary.
raw_refs = stdout.decode('utf-8', 'ignore')
refs = re.findall(GIT_HASH_REFNAME_REGEX, raw_refs, re.MULTILINE)
return {refname: commit for (commit, refname) in refs}
async def cloud_build_start(commands, config_path, config, git_ref):
"""Submit a new workflow to Google Cloud Build.
Args:
commands: GitPatrolCommands object used to execute external commands.
config_path: Path to the Cloud Build configuration sources.
config: Configuration object to read Cloud Build config from.
git_ref: The git ref (ex: refs/heads/master, refs/tags/v0.0.1) that
triggered this workflow execution.
Returns:
The UUID of the newly created Cloud Build workflow if successful. Otherwise
returns None.
"""
arg_config = '--config={}'.format(os.path.join(config_path, config['config']))
# Provide a few default substitutions that Google Cloud Build would fill in
# if it was launching a triggered workflow. See link for details...
# https://cloud.google.com/cloud-build/docs/configuring-builds/substitute-variable-values
arg_substitutions = '--substitutions='
if git_ref.startswith('refs/tags/'):
arg_substitutions += 'TAG_NAME={},'.format(
git_ref.replace('refs/tags/', ''))
elif git_ref.startswith('refs/heads/'):
arg_substitutions += 'BRANCH_NAME={},'.format(
git_ref.replace('refs/heads/', ''))
# Generate a substitution string from the target config.
# TODO(brian): Handle no substitutions.
subs_config = (
','.join(
'{!s}={!s}'.format(
k, v) for (k, v) in config['substitutions'].items()))
arg_substitutions += subs_config
arg_sources = os.path.join(config_path, config['sources'])
gcloud_subproc = await commands.gcloud(
'builds', 'submit', '--async', arg_config, arg_substitutions, arg_sources)
stdout_bytes, _ = await gcloud_subproc.communicate()
returncode = await gcloud_subproc.wait()
if returncode:
logger.warning('gcloud builds submit returned %d', returncode)
return None
stdout_lines = stdout_bytes.decode('utf-8', 'ignore').splitlines()
if not stdout_lines:
logger.warning('gcloud builds submit produced no output')
return None
build_info_line = stdout_lines[-1]
build_id_list = re.findall(GCB_ASYNC_BUILD_ID_REGEX, build_info_line)
if not build_id_list or not build_id_list[0]:
logger.fatal('gcloud builds submit output format has changed')
return None
cloud_build_uuid = build_id_list[0]
logger.info('Cloud Build started [ID=%s]', cloud_build_uuid)
return uuid.UUID(hex=cloud_build_uuid)
async def cloud_build_wait(commands, cloud_build_uuid):
"""Wait for a Google Cloud Build workflow to complete.
Args:
commands: MetaMonitorCommands object used to execute external commands.
cloud_build_uuid: UUID of the Cloud Build workflow to wait for.
Returns:
The final Cloud Build workflow state as a JSON string if successful.
Otherwise returns None.
"""
# "Stream" the logs with stdout/stderr disabled because we just care about
# waiting for the workflow to complete. It might also generate a bunch of
# text, so disabling output avoids blowing up the Python heap collecting
# stdout.
logger.info('Waiting for Cloud Build [ID=%s]', cloud_build_uuid)
gcb_log_subproc = await commands.gcloud(
'builds', 'log', '--stream', '--no-user-output-enabled',
str(cloud_build_uuid))
await gcb_log_subproc.communicate()
returncode = await gcb_log_subproc.wait()
if returncode:
logger.warning('gcloud builds log returned %d', returncode)
return None
logger.info('Cloud Build finished [ID=%s]', cloud_build_uuid)
gcb_describe_subproc = await commands.gcloud(
'builds', 'describe', '--format=json', str(cloud_build_uuid))
stdout_bytes, _ = await gcb_describe_subproc.communicate()
returncode = await gcb_describe_subproc.wait()
if returncode:
logger.warning('gcloud builds describe returned %d', returncode)
return None
return stdout_bytes.decode('utf-8', 'ignore')
def git_refs_find_deltas(previous_refs, current_refs):
"""Finds new or updated git refs.
Identifies the new git refs and the git refs whose commit hashes are
different in current refs. Git refs present in previous_refs but missing
from current_refs are ignored.
Args:
previous_refs: Dictionary of git refs to compare against.
current_refs: Dictionary of git refs possibly containing new entries or
updates.
Returns:
A dictionary of the new and updated git refs found in current_refs,
otherwise an empty dictionary.
"""
new_refs = {}
for k, v in current_refs.items():
if k not in previous_refs:
new_refs[k] = v
elif previous_refs[k] != v:
new_refs[k] = v
return new_refs
async def run_workflow_triggers(
commands, db, alias, url, ref_filters, previous_refs):
"""Evaluates workflow trigger conditions.
Poll the remote repository for a list of its git refs. The workflow trigger
will be satisfied if any git refs were added or changed since the last time
the repository was polled.
Args:
commands: GitPatrolCommands object used to execute external commands.
db: A GitPatrolDb object used for database operations.
alias: Human friendly alias of the configuration for this repository.
url: URL of the repository to patrol.
ref_filters: A (possibly empty) list of ref filters to pass to the
'git ls-remote' command to filter the returned refs.
previous_refs: List of git refs to expect in the cloned repository. Any refs
that are now in the repository and not in this list, or any altered refs
will satisfy the workflow trigger.
Returns:
Returns a (dict, dict) tuple. The first item contains a dictionary of the
current git tags in the remote repository. The second item contains a
dictionary of git refs that should trigger a workflow execution.
"""
# Retrieve current refs from the remote repo.
current_refs = await fetch_git_refs(commands, url, ref_filters)
if not current_refs:
return previous_refs, {}
# Add a new journal entry with these git refs.
update_time = datetime.datetime.utcnow()
git_refs_uuid = await db.record_git_poll(
update_time, url, alias, current_refs, ref_filters)
if not git_refs_uuid:
logger.warning('%s: failed to record git refs', alias)
return previous_refs, {}
# See if the repository was updated since the last check.
new_refs = git_refs_find_deltas(previous_refs, current_refs)
if not new_refs:
logger.info('%s: no new refs', alias)
return previous_refs, {}
logger.info('%s: new refs: %s', alias, new_refs)
return current_refs, new_refs
async def run_workflow_body(
commands, config_path, config, git_ref):
"""Runs the actual workflow logic.
Args:
commands: GitPatrolCommands object used to execute external commands.
config_path: Path to the Cloud Build configuration sources.
config: Target configuration object.
git_ref: The git ref (ex: refs/heads/master, refs/tags/v0.0.1) that
triggered this workflow execution.
Returns:
True when the workflow completes successfully. False otherwise.
"""
for workflow in config['workflows']:
build_id = await cloud_build_start(commands, config_path, workflow, git_ref)
if not build_id:
return False
status_json = await cloud_build_wait(commands, build_id)
if not status_json:
return False
return True
async def target_loop(
commands, loop, db, config_path, target_config, offset, interval):
"""Main loop to manage periodic workflow execution.
Args:
commands: GitPatrolCommands object used to execute external commands.
loop: A reference to the asyncio event loop in use.
db: A GitPatrolDb object used for database operations.
config_path: Path to files referenced by the target configuration.
target_config: Git Patrol config target information.
offset: Starting offset time in seconds.
interval: Time in seconds to wait between poll attempts.
Returns:
Nothing. Loops forever.
"""
alias = target_config['alias']
url = target_config['url']
ref_filters = []
if 'ref_filters' in target_config:
ref_filters = target_config['ref_filters']
# Validate target configuration.
if len(ref_filters) > MAX_REF_FILTERS:
logger.error('%s: too many ref filters provided', alias)
return
validate_tasks = [git_check_ref_filter(commands, f) for f in ref_filters]
ref_filters_ok = await asyncio.gather(*validate_tasks)
if not all(ref_filters_ok):
logger.error('%s: error in ref filter', alias)
return
# Fetch latest git tags from the database.
current_refs = await db.fetch_latest_refs_by_alias(alias)
logger.info('%s: current refs %s', alias, current_refs)
# Stagger the wakeup time of the target loops to avoid hammering the remote
# server with requests all at once.
next_wakeup_time = loop.time() + offset + 1
while True:
# Calculate the polling loop's next wake-up time. To stay on schedule we
# keep incrementing next_wakeup_time by the polling interval until we
# arrive at a time in the future.
while next_wakeup_time < loop.time():
next_wakeup_time += interval
sleep_time = max(0, next_wakeup_time - loop.time())
logger.info('%s: sleeping for %f', alias, sleep_time)
await asyncio.sleep(sleep_time)
# Evaluate workflow triggers to see if the workflow needs to run again.
current_refs, new_refs = await run_workflow_triggers(
commands, db, alias, url, ref_filters, current_refs)
workflow_tasks = [
run_workflow_body(commands, config_path, target_config, ref)
for ref in new_refs.keys() ]
await asyncio.gather(*workflow_tasks)
|
from django import template
from metashare.repository.models import corpusInfoType_model, \
lexicalConceptualResourceInfoType_model, \
languageDescriptionInfoType_model
# toolServiceInfoType_model,
from metashare.settings import MEDIA_URL
register = template.Library()
class ResourceMediaTypes(template.Node):
"""
Template tag that allows to display media types in result page template.
"""
def __init__(self, context_var):
"""
Initialises this template tag.
"""
super(ResourceMediaTypes, self).__init__()
self.context_var = template.Variable(context_var)
def render(self, context):
"""
Renders media types.
"""
result = []
corpus_media = self.context_var.resolve(context)
if isinstance(corpus_media, corpusInfoType_model):
media_type = corpus_media.corpusMediaType
for corpus_info in media_type.corpustextinfotype_model_set.all():
result.append(corpus_info.mediaType)
# if media_type.corpusAudioInfo:
# result.append(media_type.corpusAudioInfo.mediaType)
for corpus_info in media_type.corpusvideoinfotype_model_set.all():
result.append(corpus_info.mediaType)
# if media_type.corpusTextNgramInfo:
# result.append(media_type.corpusTextNgramInfo.mediaType)
# if media_type.corpusImageInfo:
# result.append(media_type.corpusImageInfo.mediaType)
# if media_type.corpusTextNumericalInfo:
# result.append(media_type.corpusTextNumericalInfo.mediaType)
elif isinstance(corpus_media, lexicalConceptualResourceInfoType_model):
lcr_media_type = corpus_media.lexicalConceptualResourceMediaType
if lcr_media_type.lexicalConceptualResourceTextInfo:
result.append(lcr_media_type.lexicalConceptualResourceTextInfo.mediaType)
# if lcr_media_type.lexicalConceptualResourceAudioInfo:
# result.append(lcr_media_type \
# .lexicalConceptualResourceAudioInfo.mediaType)
# if lcr_media_type.lexicalConceptualResourceVideoInfo:
# result.append(lcr_media_type \
# .lexicalConceptualResourceVideoInfo.mediaType)
# if lcr_media_type.lexicalConceptualResourceImageInfo:
# result.append(lcr_media_type \
# .lexicalConceptualResourceImageInfo.mediaType)
elif isinstance(corpus_media, languageDescriptionInfoType_model):
ld_media_type = corpus_media.languageDescriptionMediaType
if ld_media_type.languageDescriptionTextInfo:
result.append(ld_media_type.languageDescriptionTextInfo.mediaType)
# if ld_media_type.languageDescriptionVideoInfo:
# result.append(ld_media_type.languageDescriptionVideoInfo.mediaType)
# if ld_media_type.languageDescriptionImageInfo:
# result.append(ld_media_type.languageDescriptionImageInfo.mediaType)
# elif isinstance(corpus_media, toolServiceInfoType_model):
# if corpus_media.inputInfo:
# result.extend(corpus_media.inputInfo \
# .get_mediaType_display_list())
# if corpus_media.outputInfo:
# result.extend(corpus_media.outputInfo \
# .get_mediaType_display_list())
result = list(set(result))
result.sort()
# use images instead of plain text when displaying media types
image_tag = ""
if "text" in result:
image_tag = ' <img title="text" src="{}css/sexybuttons/images/icons/silk/page' \
'_white_text_media_type.png" /> ' \
.format(MEDIA_URL)
if "audio" in result:
image_tag = image_tag + ' <img title="audio" src="{}css/sexybuttons/images/' \
'icons/silk/sound_none.png" /> ' \
.format(MEDIA_URL)
if "image" in result:
image_tag = image_tag + ' <img title="image" src="{}css/sexybuttons/images/' \
'icons/silk/picture.png" /> ' \
.format(MEDIA_URL)
if "video" in result:
image_tag = image_tag + ' <img title="video" src="{}css/sexybuttons/images/' \
'icons/silk/film.png" />' \
.format(MEDIA_URL)
if "textNumerical" in result:
image_tag = image_tag + ' <img title="textNumerical" src="{}css/sexybuttons/images/' \
'icons/silk/eye.png" />' \
.format(MEDIA_URL)
if "textNgram" in result:
image_tag = image_tag + ' <img title="textNgram" src="{}css/sexybuttons/images/' \
'icons/silk/text_align_left.png" />' \
.format(MEDIA_URL)
return image_tag
def resource_media_types(parser, token):
"""
Use it like this: {% load_languages object.resourceComponentType.as_subclass %}
"""
tokens = token.contents.split()
if len(tokens) != 2:
_msg = "%r tag accepts exactly two arguments" % tokens[0]
raise template.TemplateSyntaxError(_msg)
return ResourceMediaTypes(tokens[1])
register.tag('resource_media_types', resource_media_types)
remove references to unused media components
from django import template
from metashare.repository.models import corpusInfoType_model, \
lexicalConceptualResourceInfoType_model, \
languageDescriptionInfoType_model
# toolServiceInfoType_model,
from metashare.settings import MEDIA_URL
register = template.Library()
class ResourceMediaTypes(template.Node):
"""
Template tag that allows to display media types in result page template.
"""
def __init__(self, context_var):
"""
Initialises this template tag.
"""
super(ResourceMediaTypes, self).__init__()
self.context_var = template.Variable(context_var)
def render(self, context):
"""
Renders media types.
"""
result = []
corpus_media = self.context_var.resolve(context)
if isinstance(corpus_media, corpusInfoType_model):
media_type = corpus_media.corpusMediaType
for corpus_info in media_type.corpustextinfotype_model_set.all():
result.append(corpus_info.mediaType)
# if media_type.corpusAudioInfo:
# result.append(media_type.corpusAudioInfo.mediaType)
# for corpus_info in media_type.corpusvideoinfotype_model_set.all():
# result.append(corpus_info.mediaType)
# if media_type.corpusTextNgramInfo:
# result.append(media_type.corpusTextNgramInfo.mediaType)
# if media_type.corpusImageInfo:
# result.append(media_type.corpusImageInfo.mediaType)
# if media_type.corpusTextNumericalInfo:
# result.append(media_type.corpusTextNumericalInfo.mediaType)
elif isinstance(corpus_media, lexicalConceptualResourceInfoType_model):
lcr_media_type = corpus_media.lexicalConceptualResourceMediaType
if lcr_media_type.lexicalConceptualResourceTextInfo:
result.append(lcr_media_type.lexicalConceptualResourceTextInfo.mediaType)
# if lcr_media_type.lexicalConceptualResourceAudioInfo:
# result.append(lcr_media_type \
# .lexicalConceptualResourceAudioInfo.mediaType)
# if lcr_media_type.lexicalConceptualResourceVideoInfo:
# result.append(lcr_media_type \
# .lexicalConceptualResourceVideoInfo.mediaType)
# if lcr_media_type.lexicalConceptualResourceImageInfo:
# result.append(lcr_media_type \
# .lexicalConceptualResourceImageInfo.mediaType)
elif isinstance(corpus_media, languageDescriptionInfoType_model):
ld_media_type = corpus_media.languageDescriptionMediaType
if ld_media_type.languageDescriptionTextInfo:
result.append(ld_media_type.languageDescriptionTextInfo.mediaType)
# if ld_media_type.languageDescriptionVideoInfo:
# result.append(ld_media_type.languageDescriptionVideoInfo.mediaType)
# if ld_media_type.languageDescriptionImageInfo:
# result.append(ld_media_type.languageDescriptionImageInfo.mediaType)
# elif isinstance(corpus_media, toolServiceInfoType_model):
# if corpus_media.inputInfo:
# result.extend(corpus_media.inputInfo \
# .get_mediaType_display_list())
# if corpus_media.outputInfo:
# result.extend(corpus_media.outputInfo \
# .get_mediaType_display_list())
result = list(set(result))
result.sort()
# use images instead of plain text when displaying media types
image_tag = ""
if "text" in result:
image_tag = ' <img title="text" src="{}css/sexybuttons/images/icons/silk/page' \
'_white_text_media_type.png" /> ' \
.format(MEDIA_URL)
# if "audio" in result:
# image_tag = image_tag + ' <img title="audio" src="{}css/sexybuttons/images/' \
# 'icons/silk/sound_none.png" /> ' \
# .format(MEDIA_URL)
# if "image" in result:
# image_tag = image_tag + ' <img title="image" src="{}css/sexybuttons/images/' \
# 'icons/silk/picture.png" /> ' \
# .format(MEDIA_URL)
# if "video" in result:
# image_tag = image_tag + ' <img title="video" src="{}css/sexybuttons/images/' \
# 'icons/silk/film.png" />' \
# .format(MEDIA_URL)
# if "textNumerical" in result:
# image_tag = image_tag + ' <img title="textNumerical" src="{}css/sexybuttons/images/' \
# 'icons/silk/eye.png" />' \
# .format(MEDIA_URL)
# if "textNgram" in result:
# image_tag = image_tag + ' <img title="textNgram" src="{}css/sexybuttons/images/' \
# 'icons/silk/text_align_left.png" />' \
# .format(MEDIA_URL)
return image_tag
def resource_media_types(parser, token):
"""
Use it like this: {% load_languages object.resourceComponentType.as_subclass %}
"""
tokens = token.contents.split()
if len(tokens) != 2:
_msg = "%r tag accepts exactly two arguments" % tokens[0]
raise template.TemplateSyntaxError(_msg)
return ResourceMediaTypes(tokens[1])
register.tag('resource_media_types', resource_media_types)
|
import base64
import shutil
import tempfile
import urllib2
import json
from celery import task
from django.conf import settings
from django.utils.timezone import now
from github.GithubObject import NotSet
from github import Github, GithubException, InputGitTreeElement
from ide.git import git_auth_check, get_github
from ide.models.build import BuildResult
from ide.models.project import Project
from ide.tasks import do_import_archive, run_compile
from ide.utils.git import git_sha, git_blob
from ide.utils.project import find_project_root
from ide.utils.sdk import generate_resource_dict, generate_v2_manifest_dict, dict_to_pretty_json, generate_v2_manifest
from utils.keen_helper import send_keen_event
__author__ = 'katharine'
@task(acks_late=True)
def do_import_github(project_id, github_user, github_project, github_branch, delete_project=False):
try:
url = "https://github.com/%s/%s/archive/%s.zip" % (github_user, github_project, github_branch)
if file_exists(url):
u = urllib2.urlopen(url)
with tempfile.NamedTemporaryFile(suffix='.zip') as temp:
shutil.copyfileobj(u, temp)
temp.flush()
return do_import_archive(project_id, temp.name)
else:
raise Exception("The branch '%s' does not exist." % github_branch)
except Exception as e:
try:
project = Project.objects.get(pk=project_id)
user = project.owner
except:
project = None
user = None
if delete_project and project is not None:
try:
project.delete()
except:
pass
send_keen_event('cloudpebble', 'cloudpebble_github_import_failed', user=user, data={
'data': {
'reason': e.message,
'github_user': github_user,
'github_project': github_project,
'github_branch': github_branch
}
})
raise
def file_exists(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
try:
urllib2.urlopen(request)
except:
return False
else:
return True
# SDK2 support has made this function a huge, unmaintainable mess.
@git_auth_check
def github_push(user, commit_message, repo_name, project):
g = Github(user.github.token, client_id=settings.GITHUB_CLIENT_ID, client_secret=settings.GITHUB_CLIENT_SECRET)
repo = g.get_repo(repo_name)
try:
branch = repo.get_branch(project.github_branch or repo.master_branch)
except GithubException:
raise Exception("Unable to get branch.")
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = [x.path for x in tree.tree]
next_tree = {x.path: InputGitTreeElement(path=x.path, mode=x.mode, type=x.type, sha=x.sha) for x in tree.tree}
try:
remote_version, root = find_project_root(paths)
except:
remote_version, root = project.sdk_version, ''
src_root = root + 'src/'
project_sources = project.source_files.all()
has_changed = False
for source in project_sources:
repo_path = src_root + source.file_name
if repo_path not in next_tree:
has_changed = True
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob',
content=source.get_contents())
print "New file: %s" % repo_path
else:
sha = next_tree[repo_path]._InputGitTreeElement__sha
our_content = source.get_contents()
expected_sha = git_sha(our_content)
if expected_sha != sha:
print "Updated file: %s" % repo_path
next_tree[repo_path]._InputGitTreeElement__sha = NotSet
next_tree[repo_path]._InputGitTreeElement__content = our_content
has_changed = True
expected_source_files = [src_root + x.file_name for x in project_sources]
for path in next_tree.keys():
if not path.startswith(src_root):
continue
if path not in expected_source_files:
del next_tree[path]
print "Deleted file: %s" % path
has_changed = True
# Now try handling resource files.
resources = project.resources.all()
old_resource_root = root + ("resources/src/" if remote_version == '1' else 'resources/')
new_resource_root = root + ("resources/src/" if project.sdk_version == '1' else 'resources/')
# Migrate all the resources so we can subsequently ignore the issue.
if old_resource_root != new_resource_root:
print "moving resources"
new_next_tree = next_tree.copy()
for path in next_tree:
if path.startswith(old_resource_root) and not path.endswith('resource_map.json'):
new_path = new_resource_root + path[len(old_resource_root):]
print "moving %s to %s" % (path, new_path)
next_tree[path]._InputGitTreeElement__path = new_path
new_next_tree[new_path] = next_tree[path]
del new_next_tree[path]
next_tree = new_next_tree
for res in resources:
repo_path = new_resource_root + res.path
if repo_path in next_tree:
content = res.get_contents()
if git_sha(content) != next_tree[repo_path]._InputGitTreeElement__sha:
print "Changed resource: %s" % repo_path
has_changed = True
blob = repo.create_git_blob(base64.b64encode(content), 'base64')
print "Created blob %s" % blob.sha
next_tree[repo_path]._InputGitTreeElement__sha = blob.sha
else:
print "New resource: %s" % repo_path
blob = repo.create_git_blob(base64.b64encode(res.get_contents()), 'base64')
print "Created blob %s" % blob.sha
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob', sha=blob.sha)
# Both of these are used regardless of version
remote_map_path = root + 'resources/src/resource_map.json'
remote_manifest_path = root + 'appinfo.json'
if remote_version == '1':
remote_map_sha = next_tree[remote_map_path]._InputGitTreeElement__sha if remote_map_path in next_tree else None
if remote_map_sha is not None:
their_res_dict = json.loads(git_blob(repo, remote_map_sha))
else:
their_res_dict = {'friendlyVersion': 'VERSION', 'versionDefName': '', 'media': []}
their_manifest_dict = {}
else:
remote_manifest_sha = next_tree[remote_manifest_path]._InputGitTreeElement__sha if remote_map_path in next_tree else None
if remote_manifest_sha is not None:
their_manifest_dict = json.loads(git_blob(repo, remote_manifest_sha))
their_res_dict = their_manifest_dict['resources']
else:
their_manifest_dict = {}
their_res_dict = {'media': []}
if project.sdk_version == '1':
our_res_dict = generate_resource_dict(project, resources)
else:
our_manifest_dict = generate_v2_manifest_dict(project, resources)
our_res_dict = our_manifest_dict['resources']
if our_res_dict != their_res_dict:
print "Resources mismatch."
has_changed = True
# Try removing things that we've deleted, if any
to_remove = set(x['file'] for x in their_res_dict['media']) - set(x['file'] for x in our_res_dict['media'])
for path in to_remove:
repo_path = new_resource_root + path
if repo_path in next_tree:
print "Deleted resource: %s" % repo_path
del next_tree[repo_path]
# Update the stored resource map, if applicable.
if project.sdk_version == '1':
if remote_map_path in next_tree:
next_tree[remote_map_path]._InputGitTreeElement__sha = NotSet
next_tree[remote_map_path]._InputGitTreeElement__content = dict_to_pretty_json(our_res_dict)
else:
next_tree[remote_map_path] = InputGitTreeElement(path=remote_map_path, mode='100644', type='blob',
content=dict_to_pretty_json(our_res_dict))
# Delete the v2 manifest, if one exists
if remote_manifest_path in next_tree:
del next_tree[remote_manifest_path]
# This one is separate because there's more than just the resource map changing.
if project.sdk_version == '2' and their_manifest_dict != our_manifest_dict:
if remote_manifest_path in next_tree:
next_tree[remote_manifest_path]._InputGitTreeElement__sha = NotSet
next_tree[remote_manifest_path]._InputGitTreeElement__content = generate_v2_manifest(project, resources)
else:
next_tree[remote_manifest_path] = InputGitTreeElement(path=remote_manifest_path, mode='100644', type='blob',
content=generate_v2_manifest(project, resources))
# Delete the v1 manifest, if one exists
if remote_map_path in next_tree:
del next_tree[remote_map_path]
# Commit the new tree.
if has_changed:
print "Has changed; committing"
# GitHub seems to choke if we pass the raw directory nodes off to it,
# so we delete those.
for x in next_tree.keys():
if next_tree[x]._InputGitTreeElement__mode == '040000':
del next_tree[x]
print "removing subtree node %s" % x
print [x._InputGitTreeElement__mode for x in next_tree.values()]
git_tree = repo.create_git_tree(next_tree.values())
print "Created tree %s" % git_tree.sha
git_commit = repo.create_git_commit(commit_message, git_tree, [commit])
print "Created commit %s" % git_commit.sha
git_ref = repo.get_git_ref('heads/%s' % (project.github_branch or repo.master_branch))
git_ref.edit(git_commit.sha)
print "Updated ref %s" % git_ref.ref
project.github_last_commit = git_commit.sha
project.github_last_sync = now()
project.save()
return True
send_keen_event('cloudpebble', 'cloudpebble_github_push', user=user, data={
'data': {
'repo': project.github_repo
}
})
return False
@git_auth_check
def github_pull(user, project):
g = get_github(user)
repo_name = project.github_repo
if repo_name is None:
raise Exception("No GitHub repo defined.")
repo = g.get_repo(repo_name)
# If somehow we don't have a branch set, this will use the "master_branch"
branch_name = project.github_branch or repo.master_branch
try:
branch = repo.get_branch(branch_name)
except GithubException:
raise Exception("Unable to get the branch.")
if project.github_last_commit == branch.commit.sha:
# Nothing to do.
return False
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = {x.path: x for x in tree.tree}
version, root = find_project_root(paths)
# First try finding the resource map so we don't fail out part-done later.
# TODO: transaction support for file contents would be nice...
# SDK2
if version == '2':
resource_root = root + 'resources/'
manifest_path = root + 'appinfo.json'
if manifest_path in paths:
manifest_sha = paths[manifest_path].sha
manifest = json.loads(git_blob(repo, manifest_sha))
media = manifest.get('resources', {}).get('media', [])
else:
raise Exception("appinfo.json not found")
else:
# SDK1
resource_root = root + 'resources/src/'
remote_map_path = resource_root + 'resource_map.json'
if remote_map_path in paths:
remote_map_sha = paths[remote_map_path].sha
remote_map = json.loads(git_blob(repo, remote_map_sha))
media = remote_map['media']
else:
raise Exception("resource_map.json not found.")
for resource in media:
path = resource_root + resource['file']
if path not in paths:
raise Exception("Resource %s not found in repo." % path)
# Now we grab the zip.
zip_url = repo.get_archive_link('zipball', branch_name)
u = urllib2.urlopen(zip_url)
with tempfile.NamedTemporaryFile(suffix='.zip') as temp:
shutil.copyfileobj(u, temp)
temp.flush()
# And wipe the project!
project.source_files.all().delete()
project.resources.all().delete()
# This must happen before do_import_archive or we'll stamp on its results.
project.github_last_commit = branch.commit.sha
project.github_last_sync = now()
project.save()
import_result = do_import_archive(project.id, temp.name)
send_keen_event('cloudpebble', 'cloudpebble_github_pull', user=user, data={
'data': {
'repo': project.github_repo
}
})
return import_result
@task
def do_github_push(project_id, commit_message):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_push(project.owner, commit_message, project.github_repo, project)
@task
def do_github_pull(project_id):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_pull(project.owner, project)
@task
def hooked_commit(project_id, target_commit):
project = Project.objects.select_related('owner__github').get(pk=project_id)
did_something = False
print "Comparing %s versus %s" % (project.github_last_commit, target_commit)
if project.github_last_commit != target_commit:
github_pull(project.owner, project)
did_something = True
if project.github_hook_build:
build = BuildResult.objects.create(project=project)
run_compile(build.id)
did_something = True
return did_something
wscript file should be pushed to github.
import base64
import shutil
import tempfile
import urllib2
import json
from celery import task
from django.conf import settings
from django.utils.timezone import now
from github.GithubObject import NotSet
from github import Github, GithubException, InputGitTreeElement
from ide.git import git_auth_check, get_github
from ide.models.build import BuildResult
from ide.models.project import Project
from ide.tasks import do_import_archive, run_compile
from ide.utils.git import git_sha, git_blob
from ide.utils.project import find_project_root
from ide.utils.sdk import generate_resource_dict, generate_v2_manifest_dict, dict_to_pretty_json, generate_v2_manifest,\
generate_wscript_file
from utils.keen_helper import send_keen_event
__author__ = 'katharine'
@task(acks_late=True)
def do_import_github(project_id, github_user, github_project, github_branch, delete_project=False):
try:
url = "https://github.com/%s/%s/archive/%s.zip" % (github_user, github_project, github_branch)
if file_exists(url):
u = urllib2.urlopen(url)
with tempfile.NamedTemporaryFile(suffix='.zip') as temp:
shutil.copyfileobj(u, temp)
temp.flush()
return do_import_archive(project_id, temp.name)
else:
raise Exception("The branch '%s' does not exist." % github_branch)
except Exception as e:
try:
project = Project.objects.get(pk=project_id)
user = project.owner
except:
project = None
user = None
if delete_project and project is not None:
try:
project.delete()
except:
pass
send_keen_event('cloudpebble', 'cloudpebble_github_import_failed', user=user, data={
'data': {
'reason': e.message,
'github_user': github_user,
'github_project': github_project,
'github_branch': github_branch
}
})
raise
def file_exists(url):
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
try:
urllib2.urlopen(request)
except:
return False
else:
return True
# SDK2 support has made this function a huge, unmaintainable mess.
@git_auth_check
def github_push(user, commit_message, repo_name, project):
g = Github(user.github.token, client_id=settings.GITHUB_CLIENT_ID, client_secret=settings.GITHUB_CLIENT_SECRET)
repo = g.get_repo(repo_name)
try:
branch = repo.get_branch(project.github_branch or repo.master_branch)
except GithubException:
raise Exception("Unable to get branch.")
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = [x.path for x in tree.tree]
next_tree = {x.path: InputGitTreeElement(path=x.path, mode=x.mode, type=x.type, sha=x.sha) for x in tree.tree}
try:
remote_version, root = find_project_root(paths)
except:
remote_version, root = project.sdk_version, ''
src_root = root + 'src/'
project_sources = project.source_files.all()
has_changed = False
for source in project_sources:
repo_path = src_root + source.file_name
if repo_path not in next_tree:
has_changed = True
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob',
content=source.get_contents())
print "New file: %s" % repo_path
else:
sha = next_tree[repo_path]._InputGitTreeElement__sha
our_content = source.get_contents()
expected_sha = git_sha(our_content)
if expected_sha != sha:
print "Updated file: %s" % repo_path
next_tree[repo_path]._InputGitTreeElement__sha = NotSet
next_tree[repo_path]._InputGitTreeElement__content = our_content
has_changed = True
expected_source_files = [src_root + x.file_name for x in project_sources]
for path in next_tree.keys():
if not path.startswith(src_root):
continue
if path not in expected_source_files:
del next_tree[path]
print "Deleted file: %s" % path
has_changed = True
# Now try handling resource files.
resources = project.resources.all()
old_resource_root = root + ("resources/src/" if remote_version == '1' else 'resources/')
new_resource_root = root + ("resources/src/" if project.sdk_version == '1' else 'resources/')
# Migrate all the resources so we can subsequently ignore the issue.
if old_resource_root != new_resource_root:
print "moving resources"
new_next_tree = next_tree.copy()
for path in next_tree:
if path.startswith(old_resource_root) and not path.endswith('resource_map.json'):
new_path = new_resource_root + path[len(old_resource_root):]
print "moving %s to %s" % (path, new_path)
next_tree[path]._InputGitTreeElement__path = new_path
new_next_tree[new_path] = next_tree[path]
del new_next_tree[path]
next_tree = new_next_tree
for res in resources:
repo_path = new_resource_root + res.path
if repo_path in next_tree:
content = res.get_contents()
if git_sha(content) != next_tree[repo_path]._InputGitTreeElement__sha:
print "Changed resource: %s" % repo_path
has_changed = True
blob = repo.create_git_blob(base64.b64encode(content), 'base64')
print "Created blob %s" % blob.sha
next_tree[repo_path]._InputGitTreeElement__sha = blob.sha
else:
print "New resource: %s" % repo_path
blob = repo.create_git_blob(base64.b64encode(res.get_contents()), 'base64')
print "Created blob %s" % blob.sha
next_tree[repo_path] = InputGitTreeElement(path=repo_path, mode='100644', type='blob', sha=blob.sha)
# Both of these are used regardless of version
remote_map_path = root + 'resources/src/resource_map.json'
remote_manifest_path = root + 'appinfo.json'
remote_wscript_path = root + 'wscript'
if remote_version == '1':
remote_map_sha = next_tree[remote_map_path]._InputGitTreeElement__sha if remote_map_path in next_tree else None
if remote_map_sha is not None:
their_res_dict = json.loads(git_blob(repo, remote_map_sha))
else:
their_res_dict = {'friendlyVersion': 'VERSION', 'versionDefName': '', 'media': []}
their_manifest_dict = {}
else:
remote_manifest_sha = next_tree[remote_manifest_path]._InputGitTreeElement__sha if remote_map_path in next_tree else None
if remote_manifest_sha is not None:
their_manifest_dict = json.loads(git_blob(repo, remote_manifest_sha))
their_res_dict = their_manifest_dict['resources']
else:
their_manifest_dict = {}
their_res_dict = {'media': []}
if project.sdk_version == '1':
our_res_dict = generate_resource_dict(project, resources)
else:
our_manifest_dict = generate_v2_manifest_dict(project, resources)
our_res_dict = our_manifest_dict['resources']
if our_res_dict != their_res_dict:
print "Resources mismatch."
has_changed = True
# Try removing things that we've deleted, if any
to_remove = set(x['file'] for x in their_res_dict['media']) - set(x['file'] for x in our_res_dict['media'])
for path in to_remove:
repo_path = new_resource_root + path
if repo_path in next_tree:
print "Deleted resource: %s" % repo_path
del next_tree[repo_path]
# Update the stored resource map, if applicable.
if project.sdk_version == '1':
if remote_map_path in next_tree:
next_tree[remote_map_path]._InputGitTreeElement__sha = NotSet
next_tree[remote_map_path]._InputGitTreeElement__content = dict_to_pretty_json(our_res_dict)
else:
next_tree[remote_map_path] = InputGitTreeElement(path=remote_map_path, mode='100644', type='blob',
content=dict_to_pretty_json(our_res_dict))
# Delete the v2 manifest, if one exists
if remote_manifest_path in next_tree:
del next_tree[remote_manifest_path]
# This one is separate because there's more than just the resource map changing.
if project.sdk_version == '2' and their_manifest_dict != our_manifest_dict:
if remote_manifest_path in next_tree:
next_tree[remote_manifest_path]._InputGitTreeElement__sha = NotSet
next_tree[remote_manifest_path]._InputGitTreeElement__content = generate_v2_manifest(project, resources)
else:
next_tree[remote_manifest_path] = InputGitTreeElement(path=remote_manifest_path, mode='100644', type='blob',
content=generate_v2_manifest(project, resources))
# Delete the v1 manifest, if one exists
if remote_map_path in next_tree:
del next_tree[remote_map_path]
if project.sdk_version == '2':
if remote_wscript_path not in next_tree:
next_tree[remote_wscript_path] = InputGitTreeElement(path=remote_wscript_path, mode='100644', type='blob',
content=generate_wscript_file(project, True))
has_changed = True
else:
del next_tree[remote_wscript_path]
# Commit the new tree.
if has_changed:
print "Has changed; committing"
# GitHub seems to choke if we pass the raw directory nodes off to it,
# so we delete those.
for x in next_tree.keys():
if next_tree[x]._InputGitTreeElement__mode == '040000':
del next_tree[x]
print "removing subtree node %s" % x
print [x._InputGitTreeElement__mode for x in next_tree.values()]
git_tree = repo.create_git_tree(next_tree.values())
print "Created tree %s" % git_tree.sha
git_commit = repo.create_git_commit(commit_message, git_tree, [commit])
print "Created commit %s" % git_commit.sha
git_ref = repo.get_git_ref('heads/%s' % (project.github_branch or repo.master_branch))
git_ref.edit(git_commit.sha)
print "Updated ref %s" % git_ref.ref
project.github_last_commit = git_commit.sha
project.github_last_sync = now()
project.save()
return True
send_keen_event('cloudpebble', 'cloudpebble_github_push', user=user, data={
'data': {
'repo': project.github_repo
}
})
return False
@git_auth_check
def github_pull(user, project):
g = get_github(user)
repo_name = project.github_repo
if repo_name is None:
raise Exception("No GitHub repo defined.")
repo = g.get_repo(repo_name)
# If somehow we don't have a branch set, this will use the "master_branch"
branch_name = project.github_branch or repo.master_branch
try:
branch = repo.get_branch(branch_name)
except GithubException:
raise Exception("Unable to get the branch.")
if project.github_last_commit == branch.commit.sha:
# Nothing to do.
return False
commit = repo.get_git_commit(branch.commit.sha)
tree = repo.get_git_tree(commit.tree.sha, recursive=True)
paths = {x.path: x for x in tree.tree}
version, root = find_project_root(paths)
# First try finding the resource map so we don't fail out part-done later.
# TODO: transaction support for file contents would be nice...
# SDK2
if version == '2':
resource_root = root + 'resources/'
manifest_path = root + 'appinfo.json'
if manifest_path in paths:
manifest_sha = paths[manifest_path].sha
manifest = json.loads(git_blob(repo, manifest_sha))
media = manifest.get('resources', {}).get('media', [])
else:
raise Exception("appinfo.json not found")
else:
# SDK1
resource_root = root + 'resources/src/'
remote_map_path = resource_root + 'resource_map.json'
if remote_map_path in paths:
remote_map_sha = paths[remote_map_path].sha
remote_map = json.loads(git_blob(repo, remote_map_sha))
media = remote_map['media']
else:
raise Exception("resource_map.json not found.")
for resource in media:
path = resource_root + resource['file']
if path not in paths:
raise Exception("Resource %s not found in repo." % path)
# Now we grab the zip.
zip_url = repo.get_archive_link('zipball', branch_name)
u = urllib2.urlopen(zip_url)
with tempfile.NamedTemporaryFile(suffix='.zip') as temp:
shutil.copyfileobj(u, temp)
temp.flush()
# And wipe the project!
project.source_files.all().delete()
project.resources.all().delete()
# This must happen before do_import_archive or we'll stamp on its results.
project.github_last_commit = branch.commit.sha
project.github_last_sync = now()
project.save()
import_result = do_import_archive(project.id, temp.name)
send_keen_event('cloudpebble', 'cloudpebble_github_pull', user=user, data={
'data': {
'repo': project.github_repo
}
})
return import_result
@task
def do_github_push(project_id, commit_message):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_push(project.owner, commit_message, project.github_repo, project)
@task
def do_github_pull(project_id):
project = Project.objects.select_related('owner__github').get(pk=project_id)
return github_pull(project.owner, project)
@task
def hooked_commit(project_id, target_commit):
project = Project.objects.select_related('owner__github').get(pk=project_id)
did_something = False
print "Comparing %s versus %s" % (project.github_last_commit, target_commit)
if project.github_last_commit != target_commit:
github_pull(project.owner, project)
did_something = True
if project.github_hook_build:
build = BuildResult.objects.create(project=project)
run_compile(build.id)
did_something = True
return did_something |
#!/usr/bin/env python
import os
import sys
import pickle
import time
import traceback
import yahoo.search
from yahoo.search.web import WebSearch
APP_ID = 'P5ihFKzV34G69QolFfb3nN7p0rSsYfC9tPGq.IUS.NLWEeJ14SG9Lei0rwFtgwL8cDBrA6Egdw--'
QUERY_MODIFIERS = '-site:txdns.net -site:sitedossier.com -mx -site:dataopedia.com -site:l0t3k.net -syslog -"4.2.2.1" -site:cqcounter.com -site:flow.nttu.edu.tw -site:websiteoutlook.com -site:ipgeolocator.com -site:tdyndns.org -site:ebrara.com -site:onsamehost.com -site:ipaddresscentral.com -site:quia.jp -inetnum -site:domaintools.com -site:domainbyip.com -site:pdos.csail.mit.edu -statistics -"country name" -"Q_RTT" -site:botsvsbrowsers.com -"ptr record" -site:ip-db.com -site:chaip.com.cn -site:lookup365.com -"IP Country" -site:iptoolboxes.com -"Unknown Country" -"Q_RTT" -amerika -whois -Mozilla -site:domaincrawler.com -site:geek-tools.org -site:visualware.com -site:robtex.com -site:domaintool.se -site:opendns.se -site:ungefiltert-surfen.de -site:datakitteh.org -"SLOVAKIA (SK)" -"IP Search" -site:www.medicore.com.ua -site:dig.similarbase.com -site:ipcorporationwiki.com -site:coolwhois.com'
CACHE_DIR = os.getenv('HOME') + '/.ycache'
def CheckPopularity(ip):
cache_path = os.path.join(CACHE_DIR, ip) + '.pickle'
if os.path.exists(cache_path):
f = open(cache_path)
return pickle.load(f)
else:
try:
query = '"%s" %s' % (ip, QUERY_MODIFIERS)
srch = WebSearch(APP_ID, query=query, results=50)
results = srch.parse_results()
pf = open(cache_path + '.pickle', 'w')
pickle.dump(results.results, pf)
pf.close()
return results
except yahoo.search.SearchError:
print "%s failed" % (ip)
return []
if __name__ == "__main__":
for ip in sys.argv[1:]:
print '%s = %s' % (ip, total)
for result in results.results:
try:
print ' - %s: %s' % (result['Url'], result['Title'])
except UnicodeEncodeError:
print ' - %s' % result['Url']# print results.results
time.sleep(0.5)
Add corpwiki/iptool
#!/usr/bin/env python
import os
import sys
import pickle
import time
import traceback
import yahoo.search
from yahoo.search.web import WebSearch
APP_ID = 'P5ihFKzV34G69QolFfb3nN7p0rSsYfC9tPGq.IUS.NLWEeJ14SG9Lei0rwFtgwL8cDBrA6Egdw--'
QUERY_MODIFIERS = '-site:txdns.net -site:sitedossier.com -mx -site:dataopedia.com -site:l0t3k.net -syslog -"4.2.2.1" -site:cqcounter.com -site:flow.nttu.edu.tw -site:websiteoutlook.com -site:ipgeolocator.com -site:tdyndns.org -site:ebrara.com -site:onsamehost.com -site:ipaddresscentral.com -site:quia.jp -inetnum -site:domaintools.com -site:domainbyip.com -site:pdos.csail.mit.edu -statistics -"country name" -"Q_RTT" -site:botsvsbrowsers.com -"ptr record" -site:ip-db.com -site:chaip.com.cn -site:lookup365.com -"IP Country" -site:iptoolboxes.com -"Unknown Country" -"Q_RTT" -amerika -whois -Mozilla -site:domaincrawler.com -site:geek-tools.org -site:visualware.com -site:robtex.com -site:domaintool.se -site:opendns.se -site:ungefiltert-surfen.de -site:datakitteh.org -"SLOVAKIA (SK)" -"IP Search" -site:www.medicore.com.ua -site:dig.similarbase.com -site:ipcorporationwiki.com -site:coolwhois.com -site:corporationwiki.com -site:iptool.us'
CACHE_DIR = os.getenv('HOME') + '/.ycache'
def CheckPopularity(ip):
cache_path = os.path.join(CACHE_DIR, ip) + '.pickle'
if os.path.exists(cache_path):
f = open(cache_path)
return pickle.load(f)
else:
try:
query = '"%s" %s' % (ip, QUERY_MODIFIERS)
srch = WebSearch(APP_ID, query=query, results=50)
results = srch.parse_results()
pf = open(cache_path + '.pickle', 'w')
pickle.dump(results.results, pf)
pf.close()
return results
except yahoo.search.SearchError:
print "%s failed" % (ip)
return []
if __name__ == "__main__":
for ip in sys.argv[1:]:
print '%s = %s' % (ip, total)
for result in results.results:
try:
print ' - %s: %s' % (result['Url'], result['Title'])
except UnicodeEncodeError:
print ' - %s' % result['Url']# print results.results
time.sleep(0.5)
|
import unittest
from mock import Mock
import nose.tools as nt
from nbx.nbmanager.gisthub import TaggedGist, GistHub
from nbx.nbmanager.tests.common import hub, require_github, makeFakeGist
def generate_tagged_gists(names):
mocks = []
for id, name in enumerate(names, 1):
mock = Mock()
mock.description = name
mock.id = id
mocks.append(mock)
tagged_gists = [(mock.id, TaggedGist.from_gist(mock))
for mock in mocks if mock.description]
tagged_gists = dict(tagged_gists)
return tagged_gists
def generate_gisthub(names):
gists = generate_tagged_gists(names)
gh = GistHub(Mock())
gh._tagged_gists = gists
return gh
class TestTaggedGist(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_from_gist(self):
gist = Mock()
gist.description = "Dale Name #notebook #hello"
tg = TaggedGist.from_gist(gist)
nt.assert_equals(tg.name, "Dale Name")
nt.assert_items_equal(tg.tags, ['#notebook', '#hello'])
nt.assert_true(tg.active)
gist = Mock()
gist.description = "Dale Name #notebook #inactive"
tg = TaggedGist.from_gist(gist)
nt.assert_equals(tg.name, "Dale Name")
nt.assert_items_equal(tg.tags, ['#notebook'])
# explicitly test system tags
nt.assert_in('#inactive', TaggedGist.system_tags)
nt.assert_not_in('#inactive', tg.tags)
nt.assert_false(tg.active)
def test_files(self):
gist = Mock()
gist.description = "Dale Name #notebook #hello"
gist.files = object()
tg = TaggedGist.from_gist(gist)
nt.assert_is(tg.files, gist.files)
def test_revisions_for_file(self):
# TODO not a huge fan of how I mock github.Gist objects
gist = makeFakeGist()
tg = TaggedGist.from_gist(gist)
a_revs = tg.revisions_for_file('a.ipynb')
# should only have 2 revisions for a.ipynb
nt.assert_equal(len(a_revs), 2)
# make sure we got the right ones
for state in a_revs:
nt.assert_in('a.ipynb', state.raw_data['files'])
def test_get_revision_file(self):
gist = makeFakeGist()
tg = TaggedGist.from_gist(gist)
fo = tg.get_revision_file(0, 'a.ipynb')
correct = "{fn}_{id}_revision_content".format(fn='a.ipynb', id=0)
nt.assert_equal(fo['content'], correct)
class TestGistHub(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_filter_active(self):
names = [
"Test gist #frank",
"Frank bob number 2 #frank #bob",
"bob inactive #bob #inactive",
"bob twin #bob #twin",
"bob twin #bob #twin",
]
gh = generate_gisthub(names)
gists = gh._tagged_gists.values()
# None
active = gh._filter_active(gists, None)
nt.assert_equals(len(active), 5)
test_names = [g.name for g in active]
valid = ['Test gist', 'Frank bob number 2',
'bob twin', 'bob twin', 'bob inactive']
nt.assert_items_equal(test_names, valid)
# True
active = gh._filter_active(gists, True)
nt.assert_equals(len(active), 4)
test_names = [g.name for g in active]
valid = ['Test gist', 'Frank bob number 2', 'bob twin', 'bob twin']
nt.assert_items_equal(test_names, valid)
# false
active = gh._filter_active(gists, False)
nt.assert_equals(len(active), 1)
test_names = [g.name for g in active]
valid = ['bob inactive']
nt.assert_items_equal(test_names, valid)
def test_filter_tag(self):
names = [
"Test gist #frank",
"Frank bob number 2 #frank #bob",
"bob inactive #bob #inactive",
"bob twin #bob #twin",
"bob twin #bob #twin",
]
gh = generate_gisthub(names)
gists = gh._tagged_gists.values()
twins = gh._filter_tag(gists, 'twin')
nt.assert_equals(len(twins), 2)
test_names = [g.name for g in twins]
valid = ['bob twin', 'bob twin']
nt.assert_items_equal(test_names, valid)
def test_query(self):
names = [
"Test gist #frank",
"Frank bob number 2 #frank #bob",
"bob inactive #bob #inactive",
"bob twin #bob #twin",
"bob twin #bob #twin",
]
gh = generate_gisthub(names)
gists = gh._tagged_gists.values()
# inactive
test = gh.query(active=False)
nt.assert_equals(len(test.keys()), 1)
nt.assert_equals(len(test['#bob']), 1)
valid = ['bob inactive']
test_names = [g.name for g in test['#bob']]
nt.assert_items_equal(test_names, valid)
# filtering inactive with bob, which shoudl return same as above
test = gh.query(active=False, filter_tag='bob', drop_filter=False)
nt.assert_equals(len(test.keys()), 1)
nt.assert_equals(len(test['#bob']), 1)
valid = ['bob inactive']
test_names = [g.name for g in test['#bob']]
nt.assert_items_equal(test_names, valid)
# query filer_tag
names = [
"Test gist #frank",
"Frank bob number 2 #frank #bob",
"bob inactive #bob #inactive",
"bob twin #bob #twin",
"bob twin #bob #twin",
]
gh = generate_gisthub(names)
# filtering only by #twin should get just the bob twins
test = gh.query(filter_tag='twin')
nt.assert_items_equal(test.keys(), ['#bob'])
bobs = test['#bob']
nt.assert_equal(len(bobs), 2)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
updated test
import unittest
from mock import Mock
import nose.tools as nt
from nbx.nbmanager.gisthub import TaggedGist, GistHub
from nbx.nbmanager.tests.common import hub, require_github, makeFakeGist
def generate_tagged_gists(names):
mocks = []
for id, name in enumerate(names, 1):
mock = Mock()
mock.description = name
mock.id = id
mocks.append(mock)
tagged_gists = [(mock.id, TaggedGist.from_gist(mock))
for mock in mocks if mock.description]
tagged_gists = dict(tagged_gists)
return tagged_gists
def generate_gisthub(names):
gists = generate_tagged_gists(names)
gh = GistHub(Mock())
gh._tagged_gists = gists
return gh
class TestTaggedGist(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_from_gist(self):
gist = Mock()
gist.description = "Dale Name #notebook #hello"
tg = TaggedGist.from_gist(gist)
nt.assert_equals(tg.name, "Dale Name")
nt.assert_items_equal(tg.tags, ['#notebook', '#hello'])
nt.assert_true(tg.active)
gist = Mock()
gist.description = "Dale Name #notebook #inactive"
tg = TaggedGist.from_gist(gist)
nt.assert_equals(tg.name, "Dale Name")
nt.assert_items_equal(tg.tags, ['#notebook'])
# explicitly test system tags
nt.assert_in('#inactive', TaggedGist.system_tags)
nt.assert_not_in('#inactive', tg.tags)
nt.assert_false(tg.active)
def test_files(self):
gist = Mock()
gist.description = "Dale Name #notebook #hello"
gist.files = object()
tg = TaggedGist.from_gist(gist)
nt.assert_is(tg.files, gist.files)
def test_revisions_for_file(self):
# TODO not a huge fan of how I mock github.Gist objects
gist = makeFakeGist()
tg = TaggedGist.from_gist(gist)
a_revs = tg.revisions_for_file('a.ipynb')
# should only have 2 revisions for a.ipynb
nt.assert_equal(len(a_revs), 2)
# make sure we got the right ones
for state in a_revs:
nt.assert_in('a.ipynb', state.raw_data['files'])
def test_get_revision_file(self):
gist = makeFakeGist()
tg = TaggedGist.from_gist(gist)
fo = tg.get_revision_file(0, 'a.ipynb')
correct = "{fn}_{id}_revision_content".format(fn='a.ipynb', id=0)
nt.assert_equal(fo['content'], correct)
class TestGistHub(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_filter_active(self):
names = [
"Test gist #frank",
"Frank bob number 2 #frank #bob",
"bob inactive #bob #inactive",
"bob twin #bob #twin",
"bob twin #bob #twin",
]
gh = generate_gisthub(names)
gists = gh._tagged_gists.values()
# None
active = gh._filter_active(gists, None)
nt.assert_equals(len(active), 5)
test_names = [g.name for g in active]
valid = ['Test gist', 'Frank bob number 2',
'bob twin', 'bob twin', 'bob inactive']
nt.assert_items_equal(test_names, valid)
# True
active = gh._filter_active(gists, True)
nt.assert_equals(len(active), 4)
test_names = [g.name for g in active]
valid = ['Test gist', 'Frank bob number 2', 'bob twin', 'bob twin']
nt.assert_items_equal(test_names, valid)
# false
active = gh._filter_active(gists, False)
nt.assert_equals(len(active), 1)
test_names = [g.name for g in active]
valid = ['bob inactive']
nt.assert_items_equal(test_names, valid)
def test_filter_tag(self):
names = [
"Test gist #frank",
"Frank bob number 2 #frank #bob",
"bob inactive #bob #inactive",
"bob twin #bob #twin",
"bob twin #bob #twin",
]
gh = generate_gisthub(names)
gists = gh._tagged_gists.values()
twins = gh._filter_tag(gists, 'twin')
nt.assert_equals(len(twins), 2)
test_names = [g.name for g in twins]
valid = ['bob twin', 'bob twin']
nt.assert_items_equal(test_names, valid)
def test_query(self):
names = [
"Test gist #frank",
"Frank bob number 2 #frank #bob",
"bob inactive #bob #inactive",
"bob twin #bob #twin",
"bob twin #bob #twin",
]
gh = generate_gisthub(names)
gists = gh._tagged_gists.values()
# inactive
test = gh.query(active=False)
# will return both #inactive and #bob
nt.assert_equals(len(test.keys()), 2)
nt.assert_equals(len(test['#bob']), 1)
valid = ['bob inactive']
test_names = [g.name for g in test['#bob']]
nt.assert_items_equal(test_names, valid)
# filtering inactive with bob, which shoudl return same as above
test = gh.query(active=False, filter_tag='bob', drop_filter=False)
nt.assert_equals(len(test.keys()), 2)
nt.assert_equals(len(test['#bob']), 1)
valid = ['bob inactive']
test_names = [g.name for g in test['#bob']]
nt.assert_items_equal(test_names, valid)
# query filer_tag
names = [
"Test gist #frank",
"Frank bob number 2 #frank #bob",
"bob inactive #bob #inactive",
"bob twin #bob #twin",
"bob twin #bob #twin",
]
gh = generate_gisthub(names)
# filtering only by #twin should get just the bob twins
test = gh.query(filter_tag='twin')
nt.assert_items_equal(test.keys(), ['#bob'])
bobs = test['#bob']
nt.assert_equal(len(bobs), 2)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
from netaddr import IPNetwork
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0, ofproto_v1_3
from ryu.lib import hub
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.topology import api as topo_api
from ryu.services.protocols.bgp.bgpspeaker import BGPSpeaker
from conf_mgr import SDNIPConfigManager
from fwd import Fwd
from hop_db import HopDB
class SDNIP(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'fwd': Fwd,
'hop_db': HopDB
}
def __init__(self, *args, **kwargs):
super(SDNIP, self).__init__(*args, **kwargs)
self.fwd = kwargs['fwd']
self.hop_db = kwargs['hop_db']
self.cfg_mgr = SDNIPConfigManager('config.json')
self.bgp_speaker =
BGPSpeaker(self.cfg_mgr.as_number,
self.cfg_mgr.router_id,
bgp_server_port=self.cfg_mgr.listen_port,
best_path_change_handler=self.best_path_change_handler,
peer_down_handler=self.peer_down_handler,
peer_up_handler=self.peer_up_handler)
speaker_ids = self.cfg_mgr.get_all_speaker_id()
for speaker_id in speaker_ids:
self.bgp_speaker.neighbor_add(speaker_id,
self.cfg_mgr.as_number,
is_next_hop_self=True)
hub.spawn(self.prefix_check_loop)
def best_path_change_handler(self, ev):
self.logger.info('best path changed:')
self.logger.info('remote_as: %d', ev.remote_as)
self.logger.info('route_dist: %s', ev.route_dist)
self.logger.info('prefix: %s', ev.prefix)
self.logger.info('nexthop: %s', ev.nexthop)
self.logger.info('label: %s', ev.label)
self.logger.info('is_withdraw: %s', ev.is_withdraw)
self.logger.info('')
self.hop_db.add_hop(ev.prefix, ev.nexthop)
self.install_best_path(ev.prefix, ev.nexthop)
def peer_down_handler(self, remote_ip, remote_as):
self.logger.info('peer down:')
self.logger.info('remote_as: %d', remote_as)
self.logger.info('remote ip: %s', remote_ip)
self.logger.info('')
def peer_up_handler(self, remote_ip, remote_as):
self.logger.info('peer up:')
self.logger.info('remote_as: %d', remote_as)
self.logger.info('remote ip: %s', remote_ip)
self.logger.info('')
def get_nexthop_host(self, ip):
hosts = topo_api.get_all_host(self)
for host in hosts:
if ip in host.ipv4:
return host
return None
def prefix_check_loop(self):
while True:
prefixs_to_install = self.hop_db.get_uninstalled_prefix_list()
self.logger.debug("prefix to install: %s", str(prefixs_to_install))
for prefix in prefixs_to_install:
nexthop = self.hop_db.get_nexthop(prefix)
self.install_best_path(prefix, nexthop)
hub.sleep(3)
def install_best_path(self, prefix, nexthop):
nexthop_host = self.get_nexthop_host(nexthop)
self.logger.debug("nexthop host: %s", str(nexthop_host))
if nexthop_host is None:
return
speaker_ids = self.cfg_mgr.get_all_speaker_id()
nexthop_port = nexthop_host.port
nexthop_mac = nexthop_host.mac
nexthop_dpid = nexthop_port.dpid
nexthop_port_no = nexthop_port.port_no
prefix_ip = str(IPNetwork(prefix).ip)
prefix_mask = str(IPNetwork(prefix).netmask)
for dp in self.fwd.get_all_datapaths():
from_dpid = dp.id
nexthop_match =
dp.ofproto_parser.OFPMatch(ipv4_dst=(prefix_ip, prefix_mask),
eth_type=2048)
pre_actions = [
dp.ofproto_parser.OFPActionSetField(eth_dst=nexthop_mac)
]
self.fwd.setup_shortest_path(from_dpid,
nexthop_dpid,
nexthop_port_no,
nexthop_match,
pre_actions)
self.hop_db.install_prefix(prefix)
Remote unused variable
from netaddr import IPNetwork
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0, ofproto_v1_3
from ryu.lib import hub
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.topology import api as topo_api
from ryu.services.protocols.bgp.bgpspeaker import BGPSpeaker
from conf_mgr import SDNIPConfigManager
from fwd import Fwd
from hop_db import HopDB
class SDNIP(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'fwd': Fwd,
'hop_db': HopDB
}
def __init__(self, *args, **kwargs):
super(SDNIP, self).__init__(*args, **kwargs)
self.fwd = kwargs['fwd']
self.hop_db = kwargs['hop_db']
self.cfg_mgr = SDNIPConfigManager('config.json')
self.bgp_speaker =
BGPSpeaker(self.cfg_mgr.as_number,
self.cfg_mgr.router_id,
bgp_server_port=self.cfg_mgr.listen_port,
best_path_change_handler=self.best_path_change_handler,
peer_down_handler=self.peer_down_handler,
peer_up_handler=self.peer_up_handler)
speaker_ids = self.cfg_mgr.get_all_speaker_id()
for speaker_id in speaker_ids:
self.bgp_speaker.neighbor_add(speaker_id,
self.cfg_mgr.as_number,
is_next_hop_self=True)
hub.spawn(self.prefix_check_loop)
def best_path_change_handler(self, ev):
self.logger.info('best path changed:')
self.logger.info('remote_as: %d', ev.remote_as)
self.logger.info('route_dist: %s', ev.route_dist)
self.logger.info('prefix: %s', ev.prefix)
self.logger.info('nexthop: %s', ev.nexthop)
self.logger.info('label: %s', ev.label)
self.logger.info('is_withdraw: %s', ev.is_withdraw)
self.logger.info('')
self.hop_db.add_hop(ev.prefix, ev.nexthop)
self.install_best_path(ev.prefix, ev.nexthop)
def peer_down_handler(self, remote_ip, remote_as):
self.logger.info('peer down:')
self.logger.info('remote_as: %d', remote_as)
self.logger.info('remote ip: %s', remote_ip)
self.logger.info('')
def peer_up_handler(self, remote_ip, remote_as):
self.logger.info('peer up:')
self.logger.info('remote_as: %d', remote_as)
self.logger.info('remote ip: %s', remote_ip)
self.logger.info('')
def get_nexthop_host(self, ip):
hosts = topo_api.get_all_host(self)
for host in hosts:
if ip in host.ipv4:
return host
return None
def prefix_check_loop(self):
while True:
prefixs_to_install = self.hop_db.get_uninstalled_prefix_list()
self.logger.debug("prefix to install: %s", str(prefixs_to_install))
for prefix in prefixs_to_install:
nexthop = self.hop_db.get_nexthop(prefix)
self.install_best_path(prefix, nexthop)
hub.sleep(3)
def install_best_path(self, prefix, nexthop):
nexthop_host = self.get_nexthop_host(nexthop)
self.logger.debug("nexthop host: %s", str(nexthop_host))
if nexthop_host is None:
return
nexthop_port = nexthop_host.port
nexthop_mac = nexthop_host.mac
nexthop_dpid = nexthop_port.dpid
nexthop_port_no = nexthop_port.port_no
prefix_ip = str(IPNetwork(prefix).ip)
prefix_mask = str(IPNetwork(prefix).netmask)
for dp in self.fwd.get_all_datapaths():
from_dpid = dp.id
nexthop_match =
dp.ofproto_parser.OFPMatch(ipv4_dst=(prefix_ip, prefix_mask),
eth_type=2048)
pre_actions = [
dp.ofproto_parser.OFPActionSetField(eth_dst=nexthop_mac)
]
self.fwd.setup_shortest_path(from_dpid,
nexthop_dpid,
nexthop_port_no,
nexthop_match,
pre_actions)
self.hop_db.install_prefix(prefix)
|
import pandas as pd
# Read in the data
data = pd.read_csv('data/data.csv',index_col = 0)
#get the cosine similarity scores from data
def get_cossim_scores(congressman_name):
for i in data:
if i == congressman_name:
return data[i]
# create nodes and edges data frame
def ndf(congressman_name):
u_cols = ['id','name']
df = pd.DataFrame(columns=u_cols,index=data.index)
df['id'] = [data.index.get_loc(i) for i in data.index]
df['name'] = data.index
return df
Edges data frame done.
import pandas as pd
# Read in the data
data = pd.read_csv('data/data.csv',index_col = 0)
#get the cosine similarity scores from data
def get_cossim_scores(congressman_name):
for i in data:
if i == congressman_name:
return data[i]
# create nodes and edges data frame
def ndf(congressman_name):
u_cols = ['id','name']
df = pd.DataFrame(columns=u_cols,index=data.index)
df['id'] = [data.index.get_loc(i) for i in data.index]
df['name'] = data.index
return df
def edf(congressman_name):
u_cols = ['source','target','value']
df = pd.DataFrame(columns=u_cols,index=data.index)
df['source'] = data.index.get_loc(congressman_name)
df['target'] = [data.index.get_loc(i) for i in data.index]
df['value'] = [j for j in get_cossim_scores(congressman_name)]
return df
|
Python Refactor: bert_embeddings
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import requests
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import (
PhysicalNetwork,
NetworkOffering,
NiciraNvp,
ServiceOffering,
Network,
VirtualMachine
)
from marvin.lib.common import (get_domain, get_zone, get_template)
from nose.plugins.attrib import attr
from marvin.codes import (FAILED, PASS)
import time
import logging
class TestNiciraContoller(cloudstackTestCase):
@classmethod
def setUpClass(cls):
test_case = super(TestNiciraContoller, cls)
test_client = test_case.getClsTestClient()
cls.config = test_case.getClsConfig()
cls.api_client = test_client.getApiClient()
cls.physical_networks = cls.config.zones[0].physical_networks
cls.nicira_hosts = cls.config.niciraNvp.hosts
cls.physical_network_id = cls.get_nicira_enabled_physical_network_id(cls.physical_networks)
cls.network_offerring_services = {
'name': 'NiciraEnabledNetwork',
'displaytext': 'NiciraEnabledNetwork',
'guestiptype': 'Isolated',
'supportedservices': 'SourceNat,Dhcp,Dns,Firewall,PortForwarding,Connectivity',
'traffictype': 'GUEST',
'availability': 'Optional',
'serviceProviderList': {
'SourceNat': 'VirtualRouter',
'Dhcp': 'VirtualRouter',
'Dns': 'VirtualRouter',
'Firewall': 'VirtualRouter',
'PortForwarding': 'VirtualRouter',
'Connectivity': 'NiciraNvp'
}
}
cls.network_offering = NetworkOffering.create(cls.api_client, cls.network_offerring_services)
cls.network_offering.update(cls.api_client, state='Enabled')
cls.nicira_credentials = {
'username': 'admin',
'password': 'admin'
}
cls.nicira_master_controller = cls.determine_master_controller(
cls.nicira_hosts,
cls.nicira_credentials
)
cls.transport_zone_uuid = cls.get_transport_zone_from_controller(
cls.nicira_master_controller,
cls.nicira_credentials
)
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, test_client.getZoneForTests())
template = get_template(
cls.api_client,
cls.zone.id
)
if template == FAILED:
raise Exception("get_template() failed to return template with description %s" % cls.services['ostype'])
cls.vm_services = {
'mode': cls.zone.networktype,
'small': {
'zoneid': cls.zone.id,
'template': template.id,
'displayname': 'testserver',
'username': cls.config.zones[0].pods[0].clusters[0].hosts[0].username,
'password': cls.config.zones[0].pods[0].clusters[0].hosts[0].password,
'ssh_port': 22,
'hypervisor': cls.config.zones[0].pods[0].clusters[0].hypervisor,
'privateport': 22,
'publicport': 22,
'protocol': 'TCP',
},
'service_offerings': {
'tiny': {
'name': 'Tiny Instance',
'displaytext': 'Tiny Instance',
'cpunumber': 1,
'cpuspeed': 100,
'memory': 64,
}
}
}
if cls.zone.localstorageenabled == True:
cls.vm_services['service_offerings']['tiny']['storagetype'] = 'local'
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.vm_services['service_offerings']['tiny']
)
cls.cleanup = [
cls.network_offering,
cls.service_offering
]
cls.logger = logging.getLogger('TestNiciraContoller')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, reversed(cls.cleanup))
except Exception as e:
raise Exception("Warning: Exception during class cleanup : %s" % e)
def setUp(self):
self.test_cleanup = []
def tearDown(self):
try:
cleanup_resources(self.api_client, reversed(self.test_cleanup))
except Exception as e:
raise Exception("Warning: Exception during test cleanup : %s" % e)
@classmethod
def determine_master_controller(cls, hosts, credentials):
for host in hosts:
r1 = requests.post("https://%s/ws.v1/login" % host, credentials, verify=False)
r2 = requests.get("https://%s/ws.v1/control-cluster/status" % host, verify=False, cookies=r1.cookies)
status_code = r2.status_code
if status_code == 401:
continue
elif status_code == 200:
return host
raise Exception("None of the supplied hosts (%s) is a Nicira controller" % hosts)
@classmethod
def get_transport_zone_from_controller(cls, controller_host, credentials):
r1 = requests.post("https://%s/ws.v1/login" % controller_host, credentials, verify=False)
r2 = requests.get("https://%s/ws.v1/transport-zone" % controller_host, verify=False, cookies=r1.cookies)
status_code = r2.status_code
if status_code == 200:
list_transport_zone_response = r2.json()
result_count = list_transport_zone_response['result_count']
if result_count == 0:
raise Exception('Nicira controller did not return any Transport Zones')
elif result_count > 1:
self.logger.debug("Nicira controller returned %s Transport Zones, picking first one" % resultCount)
transport_zone_api_url = list_transport_zone_response['results'][0]['_href']
r3 = requests.get(
"https://%s%s" % (controller_host, transport_zone_api_url),
verify=False,
cookies=r1.cookies
)
return r3.json()['uuid']
else:
raise Exception("Unexpected response from Nicira controller. Status code = %s, content = %s" % status_code)
@classmethod
def get_nicira_enabled_physical_network_id(cls, physical_networks):
nicira_physical_network_name = None
for physical_network in physical_networks:
for provider in physical_network.providers:
if provider.name == 'NiciraNvp':
nicira_physical_network_name = physical_network.name
if nicira_physical_network_name is None:
raise Exception('Did not find a Nicira enabled physical network in configuration')
return PhysicalNetwork.list(cls.api_client, name=nicira_physical_network_name)[0].id
def determine_slave_conroller(self, hosts, master_controller):
slaves = [ s for s in hosts if s != master_controller ]
if len(slaves) > 0:
return slaves[0]
else:
raise Exception("None of the supplied hosts (%s) is a Nicira slave" % hosts)
def add_nicira_device(self, hostname):
nicira_device = NiciraNvp.add(
self.api_client,
None,
self.physical_network_id,
hostname=hostname,
username=self.nicira_credentials['username'],
password=self.nicira_credentials['password'],
transportzoneuuid=self.transport_zone_uuid)
self.test_cleanup.append(nicira_device)
def create_guest_network(self):
network_services = {
'name' : 'nicira_enabled_network',
'displaytext' : 'nicira_enabled_network',
'zoneid' : self.zone.id,
'networkoffering' : self.network_offering.id
}
network = Network.create(
self.api_client,
network_services,
accountid='admin',
domainid=self.domain.id,
)
self.test_cleanup.append(network)
return network
def create_virtual_machine(self, network):
virtual_machine = VirtualMachine.create(
self.api_client,
self.vm_services['small'],
accountid='admin',
domainid=self.domain.id,
serviceofferingid=self.service_offering.id,
networkids=[network.id],
mode=self.vm_services['mode']
)
self.test_cleanup.append(virtual_machine)
return virtual_machine
def get_routers_for_network(self, network):
return list_routers(
self.apiclient,
account='admin',
domainid=self.account.domainid,
networkid=network.id
)
@attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true")
def test_01_nicira_controller(self):
self.add_nicira_device(self.nicira_master_controller)
network = self.create_guest_network()
virtual_machine = self.create_virtual_machine(network)
list_vm_response = VirtualMachine.list(self.api_client, id=virtual_machine.id)
self.logger.debug("Verify listVirtualMachines response for virtual machine: %s" % virtual_machine.id)
self.assertEqual(isinstance(list_vm_response, list), True, 'Response did not return a valid list')
self.assertNotEqual(len(list_vm_response), 0, 'List of VMs is empty')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.id, virtual_machine.id, 'Virtual machine in response does not match request')
self.assertEqual(vm_response.state, 'Running', 'VM is not in Running state')
@attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true")
def test_02_nicira_controller_redirect(self):
"""
Nicira clusters will redirect clients (in this case ACS) to the master node.
This test assumes that a Nicira cluster is present and configured properly, and
that it has at least two controller nodes. The test will check that ASC follows
redirects by:
- adding a Nicira Nvp device that points to one of the cluster's slave controllers,
- create a VM in a Nicira backed network
If all is well, no matter what controller is specified (slaves or master), the vm (and respective router VM)
should be created without issues.
"""
nicira_slave = self.determine_slave_conroller(self.nicira_hosts, self.nicira_master_controller)
self.logger.debug("Nicira slave controller is: %s " % nicira_slave)
self.add_nicira_device(nicira_slave)
network = self.create_guest_network()
virtual_machine = self.create_virtual_machine(network)
list_vm_response = VirtualMachine.list(self.api_client, id=virtual_machine.id)
self.logger.debug("Verify listVirtualMachines response for virtual machine: %s" % virtual_machine.id)
self.assertEqual(isinstance(list_vm_response, list), True, 'Response did not return a valid list')
self.assertNotEqual(len(list_vm_response), 0, 'List of VMs is empty')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.id, virtual_machine.id, 'Virtual machine in response does not match request')
self.assertEqual(vm_response.state, 'Running', 'VM is not in Running state')
Test NSX tunnel in guest network
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import requests
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import (
PhysicalNetwork,
NetworkOffering,
NiciraNvp,
ServiceOffering,
NATRule,
PublicIPAddress,
Network,
VirtualMachine
)
from marvin.lib.common import (
get_domain,
get_zone,
get_template,
list_routers,
list_hosts,
findSuitableHostForMigration
)
from nose.plugins.attrib import attr
from marvin.codes import (FAILED, PASS)
import time
import logging
class TestNiciraContoller(cloudstackTestCase):
@classmethod
def setUpClass(cls):
test_case = super(TestNiciraContoller, cls)
test_client = test_case.getClsTestClient()
cls.config = test_case.getClsConfig()
cls.api_client = test_client.getApiClient()
cls.physical_networks = cls.config.zones[0].physical_networks
cls.nicira_hosts = cls.config.niciraNvp.hosts
cls.physical_network_id = cls.get_nicira_enabled_physical_network_id(cls.physical_networks)
cls.network_offerring_services = {
'name': 'NiciraEnabledNetwork',
'displaytext': 'NiciraEnabledNetwork',
'guestiptype': 'Isolated',
'supportedservices': 'SourceNat,Dhcp,Dns,Firewall,PortForwarding,Connectivity',
'traffictype': 'GUEST',
'availability': 'Optional',
'serviceProviderList': {
'SourceNat': 'VirtualRouter',
'Dhcp': 'VirtualRouter',
'Dns': 'VirtualRouter',
'Firewall': 'VirtualRouter',
'PortForwarding': 'VirtualRouter',
'Connectivity': 'NiciraNvp'
}
}
cls.network_offering = NetworkOffering.create(cls.api_client, cls.network_offerring_services)
cls.network_offering.update(cls.api_client, state='Enabled')
cls.nicira_credentials = {
'username': 'admin',
'password': 'admin'
}
cls.nicira_master_controller = cls.determine_master_controller(
cls.nicira_hosts,
cls.nicira_credentials
)
cls.transport_zone_uuid = cls.get_transport_zone_from_controller(
cls.nicira_master_controller,
cls.nicira_credentials
)
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, test_client.getZoneForTests())
template = get_template(
cls.api_client,
cls.zone.id
)
if template == FAILED:
raise Exception("get_template() failed to return template with description %s" % cls.services['ostype'])
cls.vm_services = {
'mode': cls.zone.networktype,
'small': {
'zoneid': cls.zone.id,
'template': template.id,
'displayname': 'testserver',
'username': cls.config.zones[0].pods[0].clusters[0].hosts[0].username,
'password': cls.config.zones[0].pods[0].clusters[0].hosts[0].password,
'ssh_port': 22,
'hypervisor': cls.config.zones[0].pods[0].clusters[0].hypervisor,
'privateport': 22,
'publicport': 22,
'protocol': 'TCP',
},
'service_offerings': {
'tiny': {
'name': 'Tiny Instance',
'displaytext': 'Tiny Instance',
'cpunumber': 1,
'cpuspeed': 100,
'memory': 64,
}
}
}
if cls.zone.localstorageenabled == True:
cls.vm_services['service_offerings']['tiny']['storagetype'] = 'local'
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.vm_services['service_offerings']['tiny']
)
cls.cleanup = [
cls.network_offering,
cls.service_offering
]
cls.logger = logging.getLogger('TestNiciraContoller')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, reversed(cls.cleanup))
except Exception as e:
raise Exception("Warning: Exception during class cleanup : %s" % e)
def setUp(self):
self.test_cleanup = []
def tearDown(self):
try:
cleanup_resources(self.api_client, reversed(self.test_cleanup))
except Exception as e:
raise Exception("Warning: Exception during test cleanup : %s" % e)
@classmethod
def determine_master_controller(cls, hosts, credentials):
for host in hosts:
r1 = requests.post("https://%s/ws.v1/login" % host, credentials, verify=False)
r2 = requests.get("https://%s/ws.v1/control-cluster/status" % host, verify=False, cookies=r1.cookies)
status_code = r2.status_code
if status_code == 401:
continue
elif status_code == 200:
return host
raise Exception("None of the supplied hosts (%s) is a Nicira controller" % hosts)
@classmethod
def get_transport_zone_from_controller(cls, controller_host, credentials):
r1 = requests.post("https://%s/ws.v1/login" % controller_host, credentials, verify=False)
r2 = requests.get("https://%s/ws.v1/transport-zone" % controller_host, verify=False, cookies=r1.cookies)
status_code = r2.status_code
if status_code == 200:
list_transport_zone_response = r2.json()
result_count = list_transport_zone_response['result_count']
if result_count == 0:
raise Exception('Nicira controller did not return any Transport Zones')
elif result_count > 1:
self.logger.debug("Nicira controller returned %s Transport Zones, picking first one" % resultCount)
transport_zone_api_url = list_transport_zone_response['results'][0]['_href']
r3 = requests.get(
"https://%s%s" % (controller_host, transport_zone_api_url),
verify=False,
cookies=r1.cookies
)
return r3.json()['uuid']
else:
raise Exception("Unexpected response from Nicira controller. Status code = %s, content = %s" % status_code)
@classmethod
def get_nicira_enabled_physical_network_id(cls, physical_networks):
nicira_physical_network_name = None
for physical_network in physical_networks:
for provider in physical_network.providers:
if provider.name == 'NiciraNvp':
nicira_physical_network_name = physical_network.name
if nicira_physical_network_name is None:
raise Exception('Did not find a Nicira enabled physical network in configuration')
return PhysicalNetwork.list(cls.api_client, name=nicira_physical_network_name)[0].id
def determine_slave_conroller(self, hosts, master_controller):
slaves = [ s for s in hosts if s != master_controller ]
if len(slaves) > 0:
return slaves[0]
else:
raise Exception("None of the supplied hosts (%s) is a Nicira slave" % hosts)
def add_nicira_device(self, hostname):
nicira_device = NiciraNvp.add(
self.api_client,
None,
self.physical_network_id,
hostname=hostname,
username=self.nicira_credentials['username'],
password=self.nicira_credentials['password'],
transportzoneuuid=self.transport_zone_uuid)
self.test_cleanup.append(nicira_device)
def create_guest_network(self):
network_services = {
'name' : 'nicira_enabled_network',
'displaytext' : 'nicira_enabled_network',
'zoneid' : self.zone.id,
'networkoffering' : self.network_offering.id
}
network = Network.create(
self.api_client,
network_services,
accountid='admin',
domainid=self.domain.id,
)
self.test_cleanup.append(network)
return network
def create_virtual_machine(self, network):
virtual_machine = VirtualMachine.create(
self.api_client,
self.vm_services['small'],
accountid='admin',
domainid=self.domain.id,
serviceofferingid=self.service_offering.id,
networkids=[network.id],
mode=self.vm_services['mode']
)
self.test_cleanup.append(virtual_machine)
return virtual_machine
def get_routers_for_network(self, network):
return list_routers(
self.api_client,
account='admin',
domainid=self.domain.id,
networkid=network.id
)
def get_hosts(self):
return list_hosts(
self.api_client,
account='admin',
domainid=self.domain.id
)
def get_master_router(self, routers):
master = filter(lambda r: r.redundantstate == 'MASTER', routers)
self.logger.debug("Found %s master router(s): %s" % (master.size(), master))
return master[0]
def distribute_vm_and_routers_by_hosts(self, virtual_machine, routers):
if len(routers) > 1:
router = self.get_router(routers)
self.logger.debug("Master Router VM is %s" % router)
else:
router = routers[0]
if router.hostid == virtual_machine.hostid:
self.logger.debug("Master Router VM is on the same host as VM")
host = findSuitableHostForMigration(self.api_client, router.id)
if host is not None:
router.migrate(self.api_client, host)
self.logger.debug("Migrated Master Router VM to host %s" % host)
else:
self.fail('No suitable host to migrate Master Router VM to')
else:
self.logger.debug("Master Router VM is not on the same host as VM: %s, %s" % (router.hostid, virtual_machine.hostid))
def acquire_publicip(self, network):
self.logger.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.api_client,
accountid='admin',
zoneid=self.zone.id,
domainid=self.domain.id,
networkid=network.id
)
self.logger.debug("Associated %s with network %s" % (public_ip.ipaddress.ipaddress, network.id))
self.test_cleanup.append(public_ip)
return public_ip
def create_natrule(self, vm, public_ip, network):
self.logger.debug("Creating NAT rule in network for vm with public IP")
nat_rule = NATRule.create(
self.api_client,
vm,
self.vm_services['small'],
ipaddressid=public_ip.ipaddress.id,
openfirewall=True,
networkid=network.id
)
self.test_cleanup.append(nat_rule)
return nat_rule
@attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true")
def test_01_nicira_controller(self):
self.add_nicira_device(self.nicira_master_controller)
network = self.create_guest_network()
virtual_machine = self.create_virtual_machine(network)
list_vm_response = VirtualMachine.list(self.api_client, id=virtual_machine.id)
self.logger.debug("Verify listVirtualMachines response for virtual machine: %s" % virtual_machine.id)
self.assertEqual(isinstance(list_vm_response, list), True, 'Response did not return a valid list')
self.assertNotEqual(len(list_vm_response), 0, 'List of VMs is empty')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.id, virtual_machine.id, 'Virtual machine in response does not match request')
self.assertEqual(vm_response.state, 'Running', 'VM is not in Running state')
@attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true")
def test_02_nicira_controller_redirect(self):
"""
Nicira clusters will redirect clients (in this case ACS) to the master node.
This test assumes that a Nicira cluster is present and configured properly, and
that it has at least two controller nodes. The test will check that ASC follows
redirects by:
- adding a Nicira Nvp device that points to one of the cluster's slave controllers,
- create a VM in a Nicira backed network
If all is well, no matter what controller is specified (slaves or master), the vm (and respective router VM)
should be created without issues.
"""
nicira_slave = self.determine_slave_conroller(self.nicira_hosts, self.nicira_master_controller)
self.logger.debug("Nicira slave controller is: %s " % nicira_slave)
self.add_nicira_device(nicira_slave)
network = self.create_guest_network()
virtual_machine = self.create_virtual_machine(network)
list_vm_response = VirtualMachine.list(self.api_client, id=virtual_machine.id)
self.logger.debug("Verify listVirtualMachines response for virtual machine: %s" % virtual_machine.id)
self.assertEqual(isinstance(list_vm_response, list), True, 'Response did not return a valid list')
self.assertNotEqual(len(list_vm_response), 0, 'List of VMs is empty')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.id, virtual_machine.id, 'Virtual machine in response does not match request')
self.assertEqual(vm_response.state, 'Running', 'VM is not in Running state')
@attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true")
def test_03_nicira_tunnel_guest_network(self):
self.add_nicira_device(self.nicira_master_controller)
network = self.create_guest_network()
virtual_machine = self.create_virtual_machine(network)
public_ip = self.acquire_publicip(network)
nat_rule = self.create_natrule(virtual_machine, public_ip, network)
list_vm_response = VirtualMachine.list(self.api_client, id=virtual_machine.id)
self.logger.debug("Verify listVirtualMachines response for virtual machine: %s" % virtual_machine.id)
self.assertEqual(isinstance(list_vm_response, list), True, 'Response did not return a valid list')
self.assertNotEqual(len(list_vm_response), 0, 'List of VMs is empty')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.id, virtual_machine.id, 'Virtual machine in response does not match request')
self.assertEqual(vm_response.state, 'Running', 'VM is not in Running state')
routers = self.get_routers_for_network(network)
self.distribute_vm_and_routers_by_hosts(virtual_machine, routers)
ssh_command = 'ping -c 3 google.com'
result = 'failed'
try:
self.logger.debug("SSH into VM: %s" % public_ip.ipaddress.ipaddress)
ssh = virtual_machine.get_ssh_client(ipaddress=public_ip.ipaddress.ipaddress)
self.logger.debug('Ping to google.com from VM')
result = str(ssh.execute(ssh_command))
self.logger.debug("SSH result: %s; COUNT is ==> %s" % (result, result.count("3 packets received")))
except Exception as e:
self.fail("SSH Access failed for %s: %s" % (vmObj.get_ip(), e))
self.assertEqual(result.count('3 packets received'), 1, 'Ping to outside world from VM should be successful')
|
# -*- coding: iso8859-1 -*-
#
# Copyright (C) 2004-2005 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from __future__ import generators
import re
import time
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.ticket import Ticket, TicketSystem
from trac.util import escape, unescape, format_datetime, http_date, \
shorten_line, sql_escape, CRLF, TRUE
from trac.web import IRequestHandler
from trac.web.chrome import add_link, add_stylesheet, INavigationContributor
from trac.wiki import wiki_to_html, wiki_to_oneliner, IWikiMacroProvider, \
IWikiSyntaxProvider
class QuerySyntaxError(Exception):
"""Exception raised when a ticket query cannot be parsed from a string."""
class Query(object):
def __init__(self, env, constraints=None, order=None, desc=0, group=None,
groupdesc = 0, verbose=0):
self.env = env
self.constraints = constraints or {}
self.order = order
self.desc = desc
self.group = group
self.groupdesc = groupdesc
self.verbose = verbose
self.fields = TicketSystem(self.env).get_ticket_fields()
self.cols = [] # lazily initialized
if self.order != 'id' \
and not self.order in [f['name'] for f in self.fields]:
# order by priority by default
self.order = 'priority'
def from_string(cls, env, string, **kw):
filters = string.split('&')
constraints = {}
for filter in filters:
filter = filter.split('=')
if len(filter) != 2:
raise QuerySyntaxError, 'Query filter requires field and ' \
'constraints separated by a "="'
field,values = filter
if not field:
raise QuerySyntaxError, 'Query filter requires field name'
values = values.split('|')
mode, neg = '', ''
if field[-1] in ('~', '^', '$'):
mode = field[-1]
field = field[:-1]
if field[-1] == '!':
neg = '!'
field = field[:-1]
values = map(lambda x: neg + mode + x, values)
constraints[field] = values
return cls(env, constraints, **kw)
from_string = classmethod(from_string)
def get_columns(self):
if self.cols:
return self.cols
# FIXME: the user should be able to configure which columns should
# be displayed
cols = ['id']
cols += [f['name'] for f in self.fields if f['type'] != 'textarea']
for col in ('reporter', 'keywords', 'cc'):
if col in cols:
cols.remove(col)
cols.append(col)
# Semi-intelligently remove columns that are restricted to a single
# value by a query constraint.
for col in [k for k in self.constraints.keys() if k in cols]:
constraint = self.constraints[col]
if len(constraint) == 1 and constraint[0] \
and not constraint[0][0] in ('!', '~', '^', '$'):
if col in cols:
cols.remove(col)
if col == 'status' and not 'closed' in constraint \
and 'resolution' in cols:
cols.remove('resolution')
if self.group in cols:
cols.remove(self.group)
def sort_columns(col1, col2):
constrained_fields = self.constraints.keys()
# Ticket ID is always the first column
if 'id' in [col1, col2]:
return col1 == 'id' and -1 or 1
# Ticket summary is always the second column
elif 'summary' in [col1, col2]:
return col1 == 'summary' and -1 or 1
# Constrained columns appear before other columns
elif col1 in constrained_fields or col2 in constrained_fields:
return col1 in constrained_fields and -1 or 1
return 0
cols.sort(sort_columns)
# Only display the first eight columns by default
# FIXME: Make this configurable on a per-user and/or per-query basis
self.cols = cols[:7]
if not self.order in self.cols and not self.order == self.group:
# Make sure the column we order by is visible, if it isn't also
# the column we group by
self.cols[-1] = self.order
return self.cols
def execute(self, db=None):
if not self.cols:
self.get_columns()
sql = self.get_sql()
self.env.log.debug("Query SQL: %s" % sql)
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute(sql)
columns = cursor.description
results = []
for row in cursor:
id = int(row[0])
result = {'id': id, 'href': self.env.href.ticket(id)}
for i in range(1, len(columns)):
name, val = columns[i][0], row[i]
if name == self.group:
val = escape(val or 'None')
elif name == 'reporter':
val = escape(val or 'anonymous')
elif name in ['changetime', 'time']:
val = int(val)
elif val is None:
val = '--'
elif name != 'description':
val = escape(val)
result[name] = val
results.append(result)
cursor.close()
return results
def get_href(self, format=None):
return self.env.href.query(order=self.order,
desc=self.desc and 1 or None,
group=self.group or None,
groupdesc=self.groupdesc and 1 or None,
verbose=self.verbose and 1 or None,
format=format,
**self.constraints)
def get_sql(self):
if not self.cols:
self.get_columns()
# Build the list of actual columns to query
cols = self.cols[:]
def add_cols(*args):
for col in args:
if not col in cols:
cols.append(col)
if self.group and not self.group in cols:
add_cols(self.group)
if self.verbose:
add_cols('reporter', 'description')
add_cols('priority', 'time', 'changetime', self.order)
cols.extend([c for c in self.constraints.keys() if not c in cols])
custom_fields = [f['name'] for f in self.fields if f.has_key('custom')]
sql = []
sql.append("SELECT " + ",".join(['t.%s AS %s' % (c, c) for c in cols
if c not in custom_fields]))
sql.append(",priority.value AS priority_value")
for k in [k for k in cols if k in custom_fields]:
sql.append(",%s.value AS %s" % (k, k))
sql.append("\nFROM ticket AS t")
for k in [k for k in cols if k in custom_fields]:
sql.append("\n LEFT OUTER JOIN ticket_custom AS %s ON " \
"(id=%s.ticket AND %s.name='%s')" % (k, k, k, k))
for col in [c for c in ('status', 'resolution', 'priority', 'severity')
if c == self.order or c == self.group or c == 'priority']:
sql.append("\n LEFT OUTER JOIN enum AS %s ON (%s.type='%s' AND %s.name=%s)"
% (col, col, col, col, col))
for col in [c for c in ['milestone', 'version']
if c == self.order or c == self.group]:
sql.append("\n LEFT OUTER JOIN %s ON (%s.name=%s)" % (col, col, col))
def get_constraint_sql(name, value, mode, neg):
value = sql_escape(value[len(mode and '!' or '' + mode):])
if name not in custom_fields:
name = 't.' + name
else:
name = name + '.value'
if mode == '~' and value:
return "COALESCE(%s,'') %sLIKE '%%%s%%'" % (
name, neg and 'NOT ' or '', value)
elif mode == '^' and value:
return "COALESCE(%s,'') %sLIKE '%s%%'" % (
name, neg and 'NOT ' or '', value)
elif mode == '$' and value:
return "COALESCE(%s,'') %sLIKE '%%%s'" % (
name, neg and 'NOT ' or '', value)
elif mode == '':
return "COALESCE(%s,'')%s='%s'" % (
name, neg and '!' or '', value)
clauses = []
for k, v in self.constraints.items():
# Determine the match mode of the constraint (contains, starts-with,
# negation, etc)
neg = len(v[0]) and v[0][0] == '!'
mode = ''
if len(v[0]) > neg and v[0][neg] in ('~', '^', '$'):
mode = v[0][neg]
# Special case for exact matches on multiple values
if not mode and len(v) > 1:
inlist = ",".join(["'" + sql_escape(val[neg and 1 or 0:]) + "'"
for val in v])
if k not in custom_fields:
col = 't.' + k
else:
col = k + '.value'
clauses.append("COALESCE(%s,'') %sIN (%s)"
% (col, neg and 'NOT ' or '', inlist))
elif len(v) > 1:
constraint_sql = filter(lambda x: x is not None,
[get_constraint_sql(k, val, mode, neg)
for val in v])
if not constraint_sql:
continue
if neg:
clauses.append("(" + " AND ".join(constraint_sql) + ")")
else:
clauses.append("(" + " OR ".join(constraint_sql) + ")")
elif len(v) == 1:
clauses.append(get_constraint_sql(k, v[0][neg and 1 or 0:],
mode, neg))
clauses = filter(None, clauses)
if clauses:
sql.append("\nWHERE " + " AND ".join(clauses))
sql.append("\nORDER BY ")
order_cols = [(self.order, self.desc)]
if self.group and self.group != self.order:
order_cols.insert(0, (self.group, self.groupdesc))
for name, desc in order_cols:
if name not in custom_fields:
col = 't.' + name
else:
col = name + '.value'
if name == 'id':
# FIXME: This is a somewhat ugly hack. Can we also have the
# column type for this? If it's an integer, we do first
# one, if text, we do 'else'
if desc:
sql.append("COALESCE(%s,0)=0 DESC," % col)
else:
sql.append("COALESCE(%s,0)=0," % col)
else:
if desc:
sql.append("COALESCE(%s,'')='' DESC," % col)
else:
sql.append("COALESCE(%s,'')=''," % col)
if name in ['status', 'resolution', 'priority', 'severity']:
if desc:
sql.append("%s.value DESC" % name)
else:
sql.append("%s.value" % name)
elif col in ['t.milestone', 't.version']:
time_col = name == 'milestone' and 'milestone.due' or 'version.time'
if desc:
sql.append("COALESCE(%s,0)=0 DESC,%s DESC,%s DESC"
% (time_col, time_col, col))
else:
sql.append("COALESCE(%s,0)=0,%s,%s"
% (time_col, time_col, col))
else:
if desc:
sql.append("%s DESC" % col)
else:
sql.append("%s" % col)
if name == self.group and not name == self.order:
sql.append(",")
if self.order != 'id':
sql.append(",t.id")
return "".join(sql)
class QueryModule(Component):
implements(IRequestHandler, INavigationContributor, IWikiSyntaxProvider)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'tickets'
def get_navigation_items(self, req):
from trac.ticket.report import ReportModule
if req.perm.has_permission('TICKET_VIEW') and \
not self.env.is_component_enabled(ReportModule):
yield 'mainnav', 'tickets', '<a href="%s">View Tickets</a>' \
% escape(self.env.href.query())
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/query'
def process_request(self, req):
req.perm.assert_permission('TICKET_VIEW')
constraints = self._get_constraints(req)
if not constraints and not req.args.has_key('order'):
# avoid displaying all tickets when the query module is invoked
# with no parameters. Instead show only open tickets, possibly
# associated with the user
constraints = {'status': ('new', 'assigned', 'reopened')}
if req.authname and req.authname != 'anonymous':
constraints['owner'] = (req.authname,)
else:
email = req.session.get('email')
name = req.session.get('name')
if email or name:
constraints['cc'] = ('~%s' % email or name,)
query = Query(self.env, constraints, req.args.get('order'),
req.args.has_key('desc'), req.args.get('group'),
req.args.has_key('groupdesc'),
req.args.has_key('verbose'))
if req.args.has_key('update'):
# Reset session vars
for var in ('query_constraints', 'query_time', 'query_tickets'):
if req.session.has_key(var):
del req.session[var]
req.redirect(query.get_href())
add_link(req, 'alternate', query.get_href('rss'), 'RSS Feed',
'application/rss+xml', 'rss')
add_link(req, 'alternate', query.get_href('csv'),
'Comma-delimited Text', 'text/plain')
add_link(req, 'alternate', query.get_href('tab'), 'Tab-delimited Text',
'text/plain')
constraints = {}
for k, v in query.constraints.items():
constraint = {'values': [], 'mode': ''}
for val in v:
neg = val.startswith('!')
if neg:
val = val[1:]
mode = ''
if val[:1] in ('~', '^', '$'):
mode, val = val[:1], val[1:]
constraint['mode'] = (neg and '!' or '') + mode
constraint['values'].append(val)
constraints[k] = constraint
req.hdf['query.constraints'] = constraints
format = req.args.get('format')
if format == 'rss':
self.display_rss(req, query)
return 'query_rss.cs', 'application/rss+xml'
elif format == 'csv':
self.display_csv(req, query)
elif format == 'tab':
self.display_csv(req, query, '\t')
else:
self.display_html(req, query)
return 'query.cs', None
# Internal methods
def _get_constraints(self, req):
constraints = {}
ticket_fields = [f['name'] for f in
TicketSystem(self.env).get_ticket_fields()]
# For clients without JavaScript, we remove constraints here if
# requested
remove_constraints = {}
to_remove = [k[10:] for k in req.args.keys()
if k.startswith('rm_filter_')]
if to_remove: # either empty or containing a single element
match = re.match(r'(\w+?)_(\d+)$', to_remove[0])
if match:
remove_constraints[match.group(1)] = int(match.group(2))
else:
remove_constraints[to_remove[0]] = -1
for field in [k for k in req.args.keys() if k in ticket_fields]:
vals = req.args[field]
if not isinstance(vals, (list, tuple)):
vals = [vals]
vals = map(lambda x: x.value, vals)
if vals:
mode = req.args.get(field + '_mode')
if mode:
vals = map(lambda x: mode + x, vals)
if remove_constraints.has_key(field):
idx = remove_constraints[field]
if idx >= 0:
del vals[idx]
if not vals:
continue
else:
continue
constraints[field] = vals
return constraints
def _get_constraint_modes(self):
modes = {}
modes['text'] = [
{'name': "contains", 'value': "~"},
{'name': "doesn't contain", 'value': "!~"},
{'name': "begins with", 'value': "^"},
{'name': "ends with", 'value': "$"},
{'name': "is", 'value': ""},
{'name': "is not", 'value': "!"}
]
modes['select'] = [
{'name': "is", 'value': ""},
{'name': "is not", 'value': "!"}
]
return modes
def display_html(self, req, query):
req.hdf['title'] = 'Custom Query'
add_stylesheet(req, 'common/css/report.css')
db = self.env.get_db_cnx()
for field in query.fields:
if field['type'] == 'textarea':
continue
hdf = {}
hdf.update(field)
del hdf['name']
req.hdf['query.fields.' + field['name']] = hdf
req.hdf['query.modes'] = self._get_constraint_modes()
# For clients without JavaScript, we add a new constraint here if
# requested
if req.args.has_key('add'):
field = req.args.get('add_filter')
if field:
idx = 0
if query.constraints.has_key(field):
idx = len(query.constraints[field])
req.hdf['query.constraints.%s.values.%d' % (field, idx)] = ''
cols = query.get_columns()
for i in range(len(cols)):
header = {'name': cols[i]}
req.hdf['query.headers.%d' % i] = header
href = self.env.href.query(group=query.group,
groupdesc=query.groupdesc and 1 or None,
verbose=query.verbose and 1 or None,
**query.constraints)
req.hdf['query.order'] = query.order
req.hdf['query.href'] = escape(href)
if query.desc:
req.hdf['query.desc'] = True
if query.group:
req.hdf['query.group'] = query.group
if query.groupdesc:
req.hdf['query.groupdesc'] = True
if query.verbose:
req.hdf['query.verbose'] = True
tickets = query.execute(db)
req.hdf['query.num_matches'] = len(tickets)
# The most recent query is stored in the user session
orig_list = rest_list = None
orig_time = int(time.time())
if str(query.constraints) != req.session.get('query_constraints'):
# New query, initialize session vars
req.session['query_constraints'] = str(query.constraints)
req.session['query_time'] = int(time.time())
req.session['query_tickets'] = ' '.join([str(t['id']) for t in tickets])
else:
orig_list = [int(id) for id in req.session.get('query_tickets', '').split()]
rest_list = orig_list[:]
orig_time = int(req.session.get('query_time', 0))
req.session['query_href'] = query.get_href()
# Find out which tickets originally in the query results no longer
# match the constraints
if rest_list:
for tid in [t['id'] for t in tickets if t['id'] in rest_list]:
rest_list.remove(tid)
for rest_id in rest_list:
ticket = Ticket(self.env, int(rest_id), db=db)
data = {'id': ticket.id, 'time': ticket.time_created,
'changetime': ticket.time_changed, 'removed': True,
'href': self.env.href.ticket(ticket.id)}
data.update(ticket.values)
tickets.insert(orig_list.index(rest_id), data)
for ticket in tickets:
if orig_list:
# Mark tickets added or changed since the query was first
# executed
if int(ticket['time']) > orig_time:
ticket['added'] = True
elif int(ticket['changetime']) > orig_time:
ticket['changed'] = True
ticket['time'] = format_datetime(ticket['time'])
if ticket.has_key('description'):
ticket['description'] = wiki_to_html(ticket['description'] or '',
self.env, req, db)
req.session['query_tickets'] = ' '.join([str(t['id']) for t in tickets])
req.hdf['query.results'] = tickets
# Kludge: only show link to available reports if the report module is
# actually enabled
from trac.ticket.report import ReportModule
if req.perm.has_permission('REPORT_VIEW') and \
self.env.is_component_enabled(ReportModule):
req.hdf['query.report_href'] = self.env.href.report()
def display_csv(self, req, query, sep=','):
req.send_response(200)
req.send_header('Content-Type', 'text/plain;charset=utf-8')
req.end_headers()
cols = query.get_columns()
req.write(sep.join([col for col in cols]) + CRLF)
results = query.execute(self.env.get_db_cnx())
for result in results:
req.write(sep.join([str(result[col]).replace(sep, '_')
.replace('\n', ' ')
.replace('\r', ' ')
for col in cols]) + CRLF)
def display_rss(self, req, query):
query.verbose = True
db = self.env.get_db_cnx()
results = query.execute(db)
for result in results:
result['href'] = self.env.abs_href.ticket(result['id'])
if result['reporter'].find('@') == -1:
result['reporter'] = ''
if result['description']:
result['description'] = escape(wiki_to_html(result['description'] or '',
self.env, req, db,
absurls=1))
if result['time']:
result['time'] = http_date(result['time'])
req.hdf['query.results'] = results
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('query', self._format_link)
def _format_link(self, formatter, ns, query, label):
if query[0] == '?':
return '<a class="query" href="%s">%s</a>' \
% (escape(formatter.href.query()) + query.replace(' ', '+'),
label)
else:
from trac.ticket.query import Query, QuerySyntaxError
try:
query = Query.from_string(formatter.env, unescape(query))
return '<a class="query" href="%s">%s</a>' \
% (escape(query.get_href()), label)
except QuerySyntaxError, e:
return '<em class="error">[Error: %s]</em>' % escape(e)
class QueryWikiMacro(Component):
"""Macro that lists tickets that match certain criteria.
This macro accepts two parameters, the second of which is optional.
The first parameter is the query itself, and uses the same syntax as for
{{{query:}}} wiki links. The second parameter determines how the list of
tickets is presented: the default presentation is to list the ticket ID next
to the summary, with each ticket on a separate line. If the second parameter
is given and set to '''compact''' then the tickets are presented as a
comma-separated list of ticket IDs.
"""
implements(IWikiMacroProvider)
def get_macros(self):
yield 'TicketQuery'
def get_macro_description(self, name):
import inspect
return inspect.getdoc(QueryWikiMacro)
def render_macro(self, req, name, content):
query_string = ''
compact = 0
argv = content.split(',')
if len(argv) > 0:
query_string = argv[0]
if len(argv) > 1:
if argv[1].strip().lower() == 'compact':
compact = 1
try:
from cStringIO import StringIO
except NameError:
from StringIO import StringIO
buf = StringIO()
query = Query.from_string(self.env, query_string)
query.order = 'id'
tickets = query.execute()
if tickets:
if compact:
links = []
for ticket in tickets:
href = self.env.href.ticket(int(ticket['id']))
summary = escape(shorten_line(ticket['summary']))
links.append('<a class="%s ticket" href="%s" '
'title="%s">#%s</a>' % (ticket['status'], href,
summary, ticket['id']))
buf.write(', '.join(links))
else:
buf.write('<dl class="wiki compact">')
for ticket in tickets:
href = self.env.href.ticket(int(ticket['id']))
buf.write('<dt><a href="%s">#%s</a></dt>' % (href,
ticket['id']))
buf.write('<dd>%s</dd>' % (escape(ticket['summary'])))
buf.write('</dl>')
return buf.getvalue()
Escape ticket values in the query view for tickets that no longer match the original criteria. Closes #2243.
git-svn-id: f68c6b3b1dcd5d00a2560c384475aaef3bc99487@2408 af82e41b-90c4-0310-8c96-b1721e28e2e2
# -*- coding: iso8859-1 -*-
#
# Copyright (C) 2004-2005 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://projects.edgewall.com/trac/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from __future__ import generators
import re
import time
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.ticket import Ticket, TicketSystem
from trac.util import escape, unescape, format_datetime, http_date, \
shorten_line, sql_escape, CRLF, TRUE
from trac.web import IRequestHandler
from trac.web.chrome import add_link, add_stylesheet, INavigationContributor
from trac.wiki import wiki_to_html, wiki_to_oneliner, IWikiMacroProvider, \
IWikiSyntaxProvider
class QuerySyntaxError(Exception):
"""Exception raised when a ticket query cannot be parsed from a string."""
class Query(object):
def __init__(self, env, constraints=None, order=None, desc=0, group=None,
groupdesc = 0, verbose=0):
self.env = env
self.constraints = constraints or {}
self.order = order
self.desc = desc
self.group = group
self.groupdesc = groupdesc
self.verbose = verbose
self.fields = TicketSystem(self.env).get_ticket_fields()
self.cols = [] # lazily initialized
if self.order != 'id' \
and not self.order in [f['name'] for f in self.fields]:
# order by priority by default
self.order = 'priority'
def from_string(cls, env, string, **kw):
filters = string.split('&')
constraints = {}
for filter in filters:
filter = filter.split('=')
if len(filter) != 2:
raise QuerySyntaxError, 'Query filter requires field and ' \
'constraints separated by a "="'
field,values = filter
if not field:
raise QuerySyntaxError, 'Query filter requires field name'
values = values.split('|')
mode, neg = '', ''
if field[-1] in ('~', '^', '$'):
mode = field[-1]
field = field[:-1]
if field[-1] == '!':
neg = '!'
field = field[:-1]
values = map(lambda x: neg + mode + x, values)
constraints[field] = values
return cls(env, constraints, **kw)
from_string = classmethod(from_string)
def get_columns(self):
if self.cols:
return self.cols
# FIXME: the user should be able to configure which columns should
# be displayed
cols = ['id']
cols += [f['name'] for f in self.fields if f['type'] != 'textarea']
for col in ('reporter', 'keywords', 'cc'):
if col in cols:
cols.remove(col)
cols.append(col)
# Semi-intelligently remove columns that are restricted to a single
# value by a query constraint.
for col in [k for k in self.constraints.keys() if k in cols]:
constraint = self.constraints[col]
if len(constraint) == 1 and constraint[0] \
and not constraint[0][0] in ('!', '~', '^', '$'):
if col in cols:
cols.remove(col)
if col == 'status' and not 'closed' in constraint \
and 'resolution' in cols:
cols.remove('resolution')
if self.group in cols:
cols.remove(self.group)
def sort_columns(col1, col2):
constrained_fields = self.constraints.keys()
# Ticket ID is always the first column
if 'id' in [col1, col2]:
return col1 == 'id' and -1 or 1
# Ticket summary is always the second column
elif 'summary' in [col1, col2]:
return col1 == 'summary' and -1 or 1
# Constrained columns appear before other columns
elif col1 in constrained_fields or col2 in constrained_fields:
return col1 in constrained_fields and -1 or 1
return 0
cols.sort(sort_columns)
# Only display the first eight columns by default
# FIXME: Make this configurable on a per-user and/or per-query basis
self.cols = cols[:7]
if not self.order in self.cols and not self.order == self.group:
# Make sure the column we order by is visible, if it isn't also
# the column we group by
self.cols[-1] = self.order
return self.cols
def execute(self, db=None):
if not self.cols:
self.get_columns()
sql = self.get_sql()
self.env.log.debug("Query SQL: %s" % sql)
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute(sql)
columns = cursor.description
results = []
for row in cursor:
id = int(row[0])
result = {'id': id, 'href': self.env.href.ticket(id)}
for i in range(1, len(columns)):
name, val = columns[i][0], row[i]
if name == self.group:
val = val or 'None'
elif name == 'reporter':
val = val or 'anonymous'
elif name in ['changetime', 'time']:
val = int(val)
elif val is None:
val = '--'
result[name] = val
results.append(result)
cursor.close()
return results
def get_href(self, format=None):
return self.env.href.query(order=self.order,
desc=self.desc and 1 or None,
group=self.group or None,
groupdesc=self.groupdesc and 1 or None,
verbose=self.verbose and 1 or None,
format=format,
**self.constraints)
def get_sql(self):
if not self.cols:
self.get_columns()
# Build the list of actual columns to query
cols = self.cols[:]
def add_cols(*args):
for col in args:
if not col in cols:
cols.append(col)
if self.group and not self.group in cols:
add_cols(self.group)
if self.verbose:
add_cols('reporter', 'description')
add_cols('priority', 'time', 'changetime', self.order)
cols.extend([c for c in self.constraints.keys() if not c in cols])
custom_fields = [f['name'] for f in self.fields if f.has_key('custom')]
sql = []
sql.append("SELECT " + ",".join(['t.%s AS %s' % (c, c) for c in cols
if c not in custom_fields]))
sql.append(",priority.value AS priority_value")
for k in [k for k in cols if k in custom_fields]:
sql.append(",%s.value AS %s" % (k, k))
sql.append("\nFROM ticket AS t")
for k in [k for k in cols if k in custom_fields]:
sql.append("\n LEFT OUTER JOIN ticket_custom AS %s ON " \
"(id=%s.ticket AND %s.name='%s')" % (k, k, k, k))
for col in [c for c in ('status', 'resolution', 'priority', 'severity')
if c == self.order or c == self.group or c == 'priority']:
sql.append("\n LEFT OUTER JOIN enum AS %s ON (%s.type='%s' AND %s.name=%s)"
% (col, col, col, col, col))
for col in [c for c in ['milestone', 'version']
if c == self.order or c == self.group]:
sql.append("\n LEFT OUTER JOIN %s ON (%s.name=%s)" % (col, col, col))
def get_constraint_sql(name, value, mode, neg):
value = sql_escape(value[len(mode and '!' or '' + mode):])
if name not in custom_fields:
name = 't.' + name
else:
name = name + '.value'
if mode == '~' and value:
return "COALESCE(%s,'') %sLIKE '%%%s%%'" % (
name, neg and 'NOT ' or '', value)
elif mode == '^' and value:
return "COALESCE(%s,'') %sLIKE '%s%%'" % (
name, neg and 'NOT ' or '', value)
elif mode == '$' and value:
return "COALESCE(%s,'') %sLIKE '%%%s'" % (
name, neg and 'NOT ' or '', value)
elif mode == '':
return "COALESCE(%s,'')%s='%s'" % (
name, neg and '!' or '', value)
clauses = []
for k, v in self.constraints.items():
# Determine the match mode of the constraint (contains, starts-with,
# negation, etc)
neg = len(v[0]) and v[0][0] == '!'
mode = ''
if len(v[0]) > neg and v[0][neg] in ('~', '^', '$'):
mode = v[0][neg]
# Special case for exact matches on multiple values
if not mode and len(v) > 1:
inlist = ",".join(["'" + sql_escape(val[neg and 1 or 0:]) + "'"
for val in v])
if k not in custom_fields:
col = 't.' + k
else:
col = k + '.value'
clauses.append("COALESCE(%s,'') %sIN (%s)"
% (col, neg and 'NOT ' or '', inlist))
elif len(v) > 1:
constraint_sql = filter(lambda x: x is not None,
[get_constraint_sql(k, val, mode, neg)
for val in v])
if not constraint_sql:
continue
if neg:
clauses.append("(" + " AND ".join(constraint_sql) + ")")
else:
clauses.append("(" + " OR ".join(constraint_sql) + ")")
elif len(v) == 1:
clauses.append(get_constraint_sql(k, v[0][neg and 1 or 0:],
mode, neg))
clauses = filter(None, clauses)
if clauses:
sql.append("\nWHERE " + " AND ".join(clauses))
sql.append("\nORDER BY ")
order_cols = [(self.order, self.desc)]
if self.group and self.group != self.order:
order_cols.insert(0, (self.group, self.groupdesc))
for name, desc in order_cols:
if name not in custom_fields:
col = 't.' + name
else:
col = name + '.value'
if name == 'id':
# FIXME: This is a somewhat ugly hack. Can we also have the
# column type for this? If it's an integer, we do first
# one, if text, we do 'else'
if desc:
sql.append("COALESCE(%s,0)=0 DESC," % col)
else:
sql.append("COALESCE(%s,0)=0," % col)
else:
if desc:
sql.append("COALESCE(%s,'')='' DESC," % col)
else:
sql.append("COALESCE(%s,'')=''," % col)
if name in ['status', 'resolution', 'priority', 'severity']:
if desc:
sql.append("%s.value DESC" % name)
else:
sql.append("%s.value" % name)
elif col in ['t.milestone', 't.version']:
time_col = name == 'milestone' and 'milestone.due' or 'version.time'
if desc:
sql.append("COALESCE(%s,0)=0 DESC,%s DESC,%s DESC"
% (time_col, time_col, col))
else:
sql.append("COALESCE(%s,0)=0,%s,%s"
% (time_col, time_col, col))
else:
if desc:
sql.append("%s DESC" % col)
else:
sql.append("%s" % col)
if name == self.group and not name == self.order:
sql.append(",")
if self.order != 'id':
sql.append(",t.id")
return "".join(sql)
class QueryModule(Component):
implements(IRequestHandler, INavigationContributor, IWikiSyntaxProvider)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'tickets'
def get_navigation_items(self, req):
from trac.ticket.report import ReportModule
if req.perm.has_permission('TICKET_VIEW') and \
not self.env.is_component_enabled(ReportModule):
yield 'mainnav', 'tickets', '<a href="%s">View Tickets</a>' \
% escape(self.env.href.query())
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/query'
def process_request(self, req):
req.perm.assert_permission('TICKET_VIEW')
constraints = self._get_constraints(req)
if not constraints and not req.args.has_key('order'):
# avoid displaying all tickets when the query module is invoked
# with no parameters. Instead show only open tickets, possibly
# associated with the user
constraints = {'status': ('new', 'assigned', 'reopened')}
if req.authname and req.authname != 'anonymous':
constraints['owner'] = (req.authname,)
else:
email = req.session.get('email')
name = req.session.get('name')
if email or name:
constraints['cc'] = ('~%s' % email or name,)
query = Query(self.env, constraints, req.args.get('order'),
req.args.has_key('desc'), req.args.get('group'),
req.args.has_key('groupdesc'),
req.args.has_key('verbose'))
if req.args.has_key('update'):
# Reset session vars
for var in ('query_constraints', 'query_time', 'query_tickets'):
if req.session.has_key(var):
del req.session[var]
req.redirect(query.get_href())
add_link(req, 'alternate', query.get_href('rss'), 'RSS Feed',
'application/rss+xml', 'rss')
add_link(req, 'alternate', query.get_href('csv'),
'Comma-delimited Text', 'text/plain')
add_link(req, 'alternate', query.get_href('tab'), 'Tab-delimited Text',
'text/plain')
constraints = {}
for k, v in query.constraints.items():
constraint = {'values': [], 'mode': ''}
for val in v:
neg = val.startswith('!')
if neg:
val = val[1:]
mode = ''
if val[:1] in ('~', '^', '$'):
mode, val = val[:1], val[1:]
constraint['mode'] = (neg and '!' or '') + mode
constraint['values'].append(val)
constraints[k] = constraint
req.hdf['query.constraints'] = constraints
format = req.args.get('format')
if format == 'rss':
self.display_rss(req, query)
return 'query_rss.cs', 'application/rss+xml'
elif format == 'csv':
self.display_csv(req, query)
elif format == 'tab':
self.display_csv(req, query, '\t')
else:
self.display_html(req, query)
return 'query.cs', None
# Internal methods
def _get_constraints(self, req):
constraints = {}
ticket_fields = [f['name'] for f in
TicketSystem(self.env).get_ticket_fields()]
# For clients without JavaScript, we remove constraints here if
# requested
remove_constraints = {}
to_remove = [k[10:] for k in req.args.keys()
if k.startswith('rm_filter_')]
if to_remove: # either empty or containing a single element
match = re.match(r'(\w+?)_(\d+)$', to_remove[0])
if match:
remove_constraints[match.group(1)] = int(match.group(2))
else:
remove_constraints[to_remove[0]] = -1
for field in [k for k in req.args.keys() if k in ticket_fields]:
vals = req.args[field]
if not isinstance(vals, (list, tuple)):
vals = [vals]
vals = map(lambda x: x.value, vals)
if vals:
mode = req.args.get(field + '_mode')
if mode:
vals = map(lambda x: mode + x, vals)
if remove_constraints.has_key(field):
idx = remove_constraints[field]
if idx >= 0:
del vals[idx]
if not vals:
continue
else:
continue
constraints[field] = vals
return constraints
def _get_constraint_modes(self):
modes = {}
modes['text'] = [
{'name': "contains", 'value': "~"},
{'name': "doesn't contain", 'value': "!~"},
{'name': "begins with", 'value': "^"},
{'name': "ends with", 'value': "$"},
{'name': "is", 'value': ""},
{'name': "is not", 'value': "!"}
]
modes['select'] = [
{'name': "is", 'value': ""},
{'name': "is not", 'value': "!"}
]
return modes
def display_html(self, req, query):
req.hdf['title'] = 'Custom Query'
add_stylesheet(req, 'common/css/report.css')
db = self.env.get_db_cnx()
for field in query.fields:
if field['type'] == 'textarea':
continue
hdf = {}
hdf.update(field)
del hdf['name']
req.hdf['query.fields.' + field['name']] = hdf
req.hdf['query.modes'] = self._get_constraint_modes()
# For clients without JavaScript, we add a new constraint here if
# requested
if req.args.has_key('add'):
field = req.args.get('add_filter')
if field:
idx = 0
if query.constraints.has_key(field):
idx = len(query.constraints[field])
req.hdf['query.constraints.%s.values.%d' % (field, idx)] = ''
cols = query.get_columns()
for i in range(len(cols)):
header = {'name': cols[i]}
req.hdf['query.headers.%d' % i] = header
href = self.env.href.query(group=query.group,
groupdesc=query.groupdesc and 1 or None,
verbose=query.verbose and 1 or None,
**query.constraints)
req.hdf['query.order'] = query.order
req.hdf['query.href'] = escape(href)
if query.desc:
req.hdf['query.desc'] = True
if query.group:
req.hdf['query.group'] = query.group
if query.groupdesc:
req.hdf['query.groupdesc'] = True
if query.verbose:
req.hdf['query.verbose'] = True
tickets = query.execute(db)
req.hdf['query.num_matches'] = len(tickets)
# The most recent query is stored in the user session
orig_list = rest_list = None
orig_time = int(time.time())
if str(query.constraints) != req.session.get('query_constraints'):
# New query, initialize session vars
req.session['query_constraints'] = str(query.constraints)
req.session['query_time'] = int(time.time())
req.session['query_tickets'] = ' '.join([str(t['id']) for t in tickets])
else:
orig_list = [int(id) for id in req.session.get('query_tickets', '').split()]
rest_list = orig_list[:]
orig_time = int(req.session.get('query_time', 0))
req.session['query_href'] = query.get_href()
# Find out which tickets originally in the query results no longer
# match the constraints
if rest_list:
for tid in [t['id'] for t in tickets if t['id'] in rest_list]:
rest_list.remove(tid)
for rest_id in rest_list:
ticket = Ticket(self.env, int(rest_id), db=db)
data = {'id': ticket.id, 'time': ticket.time_created,
'changetime': ticket.time_changed, 'removed': True,
'href': self.env.href.ticket(ticket.id)}
data.update(ticket.values)
tickets.insert(orig_list.index(rest_id), data)
for ticket in tickets:
if orig_list:
# Mark tickets added or changed since the query was first
# executed
if int(ticket['time']) > orig_time:
ticket['added'] = True
elif int(ticket['changetime']) > orig_time:
ticket['changed'] = True
for field, value in ticket.items():
if field == 'time':
ticket[field] = escape(format_datetime(value))
elif field == 'description':
ticket[field] = wiki_to_html(value or '', self.env, req, db)
else:
ticket[field] = escape(value)
req.hdf['query.results'] = tickets
req.session['query_tickets'] = ' '.join([str(t['id']) for t in tickets])
# Kludge: only show link to available reports if the report module is
# actually enabled
from trac.ticket.report import ReportModule
if req.perm.has_permission('REPORT_VIEW') and \
self.env.is_component_enabled(ReportModule):
req.hdf['query.report_href'] = self.env.href.report()
def display_csv(self, req, query, sep=','):
req.send_response(200)
req.send_header('Content-Type', 'text/plain;charset=utf-8')
req.end_headers()
cols = query.get_columns()
req.write(sep.join([col for col in cols]) + CRLF)
results = query.execute(self.env.get_db_cnx())
for result in results:
req.write(sep.join([str(result[col]).replace(sep, '_')
.replace('\n', ' ')
.replace('\r', ' ')
for col in cols]) + CRLF)
def display_rss(self, req, query):
query.verbose = True
db = self.env.get_db_cnx()
results = query.execute(db)
for result in results:
result['href'] = self.env.abs_href.ticket(result['id'])
if result['reporter'].find('@') == -1:
result['reporter'] = ''
if result['description']:
result['description'] = escape(wiki_to_html(result['description'] or '',
self.env, req, db,
absurls=1))
if result['time']:
result['time'] = http_date(result['time'])
req.hdf['query.results'] = results
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('query', self._format_link)
def _format_link(self, formatter, ns, query, label):
if query[0] == '?':
return '<a class="query" href="%s">%s</a>' \
% (escape(formatter.href.query()) + query.replace(' ', '+'),
label)
else:
from trac.ticket.query import Query, QuerySyntaxError
try:
query = Query.from_string(formatter.env, unescape(query))
return '<a class="query" href="%s">%s</a>' \
% (escape(query.get_href()), label)
except QuerySyntaxError, e:
return '<em class="error">[Error: %s]</em>' % escape(e)
class QueryWikiMacro(Component):
"""Macro that lists tickets that match certain criteria.
This macro accepts two parameters, the second of which is optional.
The first parameter is the query itself, and uses the same syntax as for
{{{query:}}} wiki links. The second parameter determines how the list of
tickets is presented: the default presentation is to list the ticket ID next
to the summary, with each ticket on a separate line. If the second parameter
is given and set to '''compact''' then the tickets are presented as a
comma-separated list of ticket IDs.
"""
implements(IWikiMacroProvider)
def get_macros(self):
yield 'TicketQuery'
def get_macro_description(self, name):
import inspect
return inspect.getdoc(QueryWikiMacro)
def render_macro(self, req, name, content):
query_string = ''
compact = 0
argv = content.split(',')
if len(argv) > 0:
query_string = argv[0]
if len(argv) > 1:
if argv[1].strip().lower() == 'compact':
compact = 1
try:
from cStringIO import StringIO
except NameError:
from StringIO import StringIO
buf = StringIO()
query = Query.from_string(self.env, query_string)
query.order = 'id'
tickets = query.execute()
if tickets:
if compact:
links = []
for ticket in tickets:
href = self.env.href.ticket(int(ticket['id']))
summary = escape(shorten_line(ticket['summary']))
links.append('<a class="%s ticket" href="%s" '
'title="%s">#%s</a>' % (ticket['status'], href,
summary, ticket['id']))
buf.write(', '.join(links))
else:
buf.write('<dl class="wiki compact">')
for ticket in tickets:
href = self.env.href.ticket(int(ticket['id']))
buf.write('<dt><a href="%s">#%s</a></dt>' % (href,
ticket['id']))
buf.write('<dd>%s</dd>' % (escape(ticket['summary'])))
buf.write('</dl>')
return buf.getvalue()
|
#!/usr/bin/env python
"""
The latest version of this package is available at:
<http://github.com/jantman/biweeklybudget>
################################################################################
Copyright 2016 Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
This file is part of biweeklybudget, also known as biweeklybudget.
biweeklybudget is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
biweeklybudget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with biweeklybudget. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/biweeklybudget> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
import sys
import os
import re
from sphinx_js.jsdoc import run_jsdoc
from sphinx_js.renderers import AutoFunctionRenderer
from collections import defaultdict
index_head = """UI JavaScript Docs
==================
Files
-----
.. toctree::
"""
class JSDocumenter(object):
"""
Generate .rst files for javascript documentation. The ``sphinx-jsdoc``
package does this nicely, but requires ``jsdoc`` to be available at
build time, which rules out readthedocs.org. So... we hack apart that
package to get it to generate static rST files on-demand, which we then save
in the source directory.
"""
func_re = re.compile(r'^function ([^\(]+)\([^\)]*\) {')
def __init__(self, toxinidir):
"""
Initialize class
:param toxinidir: tox.ini directory
:type toxinidir: str
"""
self.toxinidir = toxinidir
self.jsdir = os.path.join(
toxinidir, 'biweeklybudget', 'flaskapp', 'static', 'js'
)
self.srcdir = os.path.join(
toxinidir, 'docs', 'source'
)
self.app = FakeApp()
self.app.config.js_source_path = self.jsdir
def run(self):
"""
Main entry point to build jsdoc
"""
self._cleanup()
run_jsdoc(self.app)
# build a dict of files to the list of function longnames in them
funcs_per_file = defaultdict(type([]))
for longname in self.app._sphinxjs_doclets_by_longname.keys():
d = self.app._sphinxjs_doclets_by_longname[longname]
if d['kind'] != 'function':
continue
funcs_per_file[d['meta']['filename']].append(longname)
index = index_head
for fname in sorted(funcs_per_file.keys()):
refname = self._docs_for_file(fname, funcs_per_file[fname])
index += " %s\n" % refname
with open(os.path.join(self.srcdir, 'jsdoc.rst'), 'w') as fh:
fh.write(index)
print('Wrote: jsdoc.rst')
def _docs_for_file(self, fname, func_longnames):
"""
Generate and write documentation for a given JS file. Return the
sphinx reference name for the file.
:param fname: name of the file
:type fname: str
:param func_longnames: list of function longnames to document
:type func_longnames: list
:return: sphinx reference name for file
:rtype: str
"""
print("Documenting %s" % fname)
shortname = fname.split('.')[0]
refname = 'jsdoc.%s' % shortname
refname_esc = refname.replace('_', '\_')
doclet = self.app._sphinxjs_doclets_by_longname[func_longnames[0]]
path = os.path.join(doclet['meta']['path'], fname)
body = "%s\n" % refname_esc
body += ('=' * len(refname_esc)) + "\n\n"
body += "File: ``%s``\n\n" % path.replace(
os.path.realpath(self.toxinidir) + '/', ''
)
for funcname in sorted(func_longnames):
r = AutoFunctionRenderer(None, self.app, arguments=[funcname])
doclet = self.app._sphinxjs_doclets_by_longname.get(funcname)
body += r.rst(funcname, doclet)
docpath = os.path.join(self.srcdir, '%s.rst' % refname)
with open(docpath, 'w') as fh:
fh.write(body)
print("\tWritten to: %s" % docpath)
return refname
def _cleanup(self):
"""
Remove existing jsdoc source files.
"""
print('Cleaning up existing jsdoc*.rst source files...')
for f in os.listdir(self.srcdir):
p = os.path.join(self.srcdir, f)
if not os.path.isfile(p):
continue
if not f.startswith('jsdoc') or not f.endswith('.rst'):
continue
print("\t%s" % p)
os.unlink(p)
class Config(object):
jsdoc_config_path = None
js_source_path = None
class FakeApp(object):
config = Config()
_sphinxjs_doclets_by_class = None
_sphinxjs_doclets_by_longname = None
if __name__ == "__main__":
if len(sys.argv) != 2:
print("USAGE: make_jsdoc.py TOXINIDIR")
raise SystemExit(1)
JSDocumenter(sys.argv[1]).run()
get jsdoc to handle classes as well
#!/usr/bin/env python
"""
The latest version of this package is available at:
<http://github.com/jantman/biweeklybudget>
################################################################################
Copyright 2016 Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
This file is part of biweeklybudget, also known as biweeklybudget.
biweeklybudget is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
biweeklybudget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with biweeklybudget. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/biweeklybudget> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
import sys
import os
import re
from sphinx_js.jsdoc import run_jsdoc
from sphinx_js.renderers import AutoFunctionRenderer
from collections import defaultdict
index_head = """UI JavaScript Docs
==================
Files
-----
.. toctree::
"""
class JSDocumenter(object):
"""
Generate .rst files for javascript documentation. The ``sphinx-jsdoc``
package does this nicely, but requires ``jsdoc`` to be available at
build time, which rules out readthedocs.org. So... we hack apart that
package to get it to generate static rST files on-demand, which we then save
in the source directory.
"""
func_re = re.compile(r'^function ([^\(]+)\([^\)]*\) {')
def __init__(self, toxinidir):
"""
Initialize class
:param toxinidir: tox.ini directory
:type toxinidir: str
"""
self.toxinidir = toxinidir
self.jsdir = os.path.join(
toxinidir, 'biweeklybudget', 'flaskapp', 'static', 'js'
)
self.srcdir = os.path.join(
toxinidir, 'docs', 'source'
)
self.app = FakeApp()
self.app.config.js_source_path = self.jsdir
def run(self):
"""
Main entry point to build jsdoc
"""
self._cleanup()
run_jsdoc(self.app)
# build a dict of files to the list of function longnames in them
funcs_per_file = defaultdict(type([]))
for longname in self.app._sphinxjs_doclets_by_longname.keys():
d = self.app._sphinxjs_doclets_by_longname[longname]
if d['kind'] not in ['function', 'class']:
continue
funcs_per_file[d['meta']['filename']].append(longname)
index = index_head
for fname in sorted(funcs_per_file.keys()):
refname = self._docs_for_file(fname, funcs_per_file[fname])
index += " %s\n" % refname
with open(os.path.join(self.srcdir, 'jsdoc.rst'), 'w') as fh:
fh.write(index)
print('Wrote: jsdoc.rst')
def _docs_for_file(self, fname, func_longnames):
"""
Generate and write documentation for a given JS file. Return the
sphinx reference name for the file.
:param fname: name of the file
:type fname: str
:param func_longnames: list of function longnames to document
:type func_longnames: list
:return: sphinx reference name for file
:rtype: str
"""
print("Documenting %s" % fname)
shortname = fname.split('.')[0]
refname = 'jsdoc.%s' % shortname
refname_esc = refname.replace('_', '\_')
doclet = self.app._sphinxjs_doclets_by_longname[func_longnames[0]]
path = os.path.join(doclet['meta']['path'], fname)
body = "%s\n" % refname_esc
body += ('=' * len(refname_esc)) + "\n\n"
body += "File: ``%s``\n\n" % path.replace(
os.path.realpath(self.toxinidir) + '/', ''
)
for funcname in sorted(func_longnames):
r = AutoFunctionRenderer(None, self.app, arguments=[funcname])
doclet = self.app._sphinxjs_doclets_by_longname.get(funcname)
body += r.rst(funcname, doclet)
docpath = os.path.join(self.srcdir, '%s.rst' % refname)
with open(docpath, 'w') as fh:
fh.write(body)
print("\tWritten to: %s" % docpath)
return refname
def _cleanup(self):
"""
Remove existing jsdoc source files.
"""
print('Cleaning up existing jsdoc*.rst source files...')
for f in os.listdir(self.srcdir):
p = os.path.join(self.srcdir, f)
if not os.path.isfile(p):
continue
if not f.startswith('jsdoc') or not f.endswith('.rst'):
continue
print("\t%s" % p)
os.unlink(p)
class Config(object):
jsdoc_config_path = None
js_source_path = None
class FakeApp(object):
config = Config()
_sphinxjs_doclets_by_class = None
_sphinxjs_doclets_by_longname = None
if __name__ == "__main__":
if len(sys.argv) != 2:
print("USAGE: make_jsdoc.py TOXINIDIR")
raise SystemExit(1)
JSDocumenter(sys.argv[1]).run()
|
#!/usr/bin/env python
__author__ = 'Bjorn Wouters'
__email__ = "bjorn-wouters@hotmail.com"
"""
Description: Automatic adding and/or analysis of a specific genome in the R RnBeads package.
Version: 1.0.0.
Known bugs:
- stdout fix for Galaxy (not showing in Galaxy except on the end).
Dependencies:
- RnBeads package (edited for this script).
- Template package.
- prepare_analysis script.
- Assembly template folder.
Work to be done:
- Adding of the assembly to the source package isn't stable (can't delete assemblies folder or you'll get an error).
- Adding of more options for the analysis instead of using the default.
- Automatic sample file creation.
"""
import subprocess
import sys
import os
import platform
from shutil import rmtree, move, copyfile
import tarfile
import argparse
import prepare_analysis
import check_file_formats
import time
def main():
tmp_files = list()
# Parse all the arguments given be the argparse module.
args = parse_args()
try:
# Means the argparse subprocess "only_analysis" is chosen.
if "fasta" not in args:
# Run analysis with (optional) added annotation.
analysis_script = run_analysis(args)
tmp_files.append(analysis_script)
sys.exit(0)
# Checks the files on the right syntax.
check_file_formats.check_fasta(args.fasta)
check_file_formats.check_bed(args.bed)
check_file_formats.check_sample_file(args.sample_file)
# Prepare analysis files
if args.bed:
prepare_bed_analysis(args)
# Coverts given .Fasta file to a 2bit file and writes it to the /tmp/ directory.
twobit_file = fasta_to_2bit(args)
tmp_files.append(twobit_file)
# Makes description file out of the species and genus information given by the argparse arguments and
# writes it to the /tmp/ directory.
description = make_rnbeads_description(twobit_file, args)
tmp_files.append(description.name)
# Creates the new package of the given fasta via the BSgenome.forge method in R.
#remove all existing BSgenome.* in /tmp
cmd = "rm -rf %s"%os.path.join(args.temp_directory,'BSgenome*')
run_subprocess(cmd,"removing all existing BSgenome packages")
forge_script = forge_genome_file(description, args)
tmp_files.append(forge_script)
# Installs the genome file to R on your system.
genome_folder, genome_package = install_genome_file(args)
tmp_files.extend([genome_folder, genome_package])
# Append the RnBeads sourcecode with the new assembly.
append_source_code(args, genome_folder)
# Append assembly file of the RnBeads package.
append_script = append_assembly(args)
tmp_files.append(append_script)
# Makes the description file for the given assembly.
make_assembly_description(args, genome_folder)
# Install the new RnBeads package so appended source code will be used in the analysis.
install_rnbeads(args)
# Calculates all the CpG sites for each chromosomes and appends it to the assembly package.
site_data, r_script, region_file = get_cpg_sites(args, genome_folder)
tmp_files.extend([site_data, r_script, region_file])
# Installs the given assembly with the annotated site data
install_assembly(args)
# Means the "add_and_analysis" is chosen.
if "cores" in args:
# Run analysis with (optional) added annotation.
analysis_script = run_analysis(args)
tmp_files.append(analysis_script)
finally:
# Clear all files written to the tmp directory.
#clear_tmp(tmp_files)
a=1
def run_subprocess(cmd, log_message):
"""
Run subprocess under standardized settings
force the cmds to be a string
"""
sys.stdout.write("now starting:\t%s\n\n" % log_message)
sys.stdout.write('running:\t%s\n\n' % cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable='/bin/bash')
exit_code = p.wait()
stdout = p.stdout.read().replace('\r', '\n')
stderr = p.stderr.read().replace('\r', '\n')
if stdout:
sys.stdout.write('stdout:\n%s\n' % stdout)
if stderr:
sys.stdout.write('stderr:\n%s\n' % stderr)
sys.stdout.write('finished:\t%s\n\n' % log_message)
if exit_code:
return Exception("Call of %s failed with \n %s" % (cmd, stderr))
else:
return 0
def compress_folder(args, cur_time):
"""
For Galaxy: After finishing the analysis, the folder will be zipped in tar.gz format.
Replaces the Galaxy .dat file with the compressed analysis folder.
"""
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.scriptdir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
analysis_folder = os.path.join(script_dir, "assemblies", args.assembly_code, cur_time)
if not os.path.isdir(analysis_folder):
return
tar_file = tarfile.open(os.path.join(args.temp_directory, cur_time+"_analysis.tar.gz"), "w:gz")
tar_file.add(analysis_folder, arcname=cur_time+"_analysis")
tar_file.close()
move(tar_file.name, args.output)
def prepare_bed_analysis(args):
"""
:argument: args, all arguments from argparse.
Preparing analysis which means:
- Making .bed files with each EPP annotated for every sample.
- Making chromosomes file with each valid chromosome.
- Making the analysis folder.
"""
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.scriptdir
# script_dir = script_dir.replace(' ','\ ')
sys.stdout.write("Starting: Preparing analysis files.\n")
output_dir = os.path.join(script_dir, "assemblies", args.assembly_code, "bs.bed/")
if not os.path.exists(output_dir):
os.makedirs(output_dir) # Create folder if not already exists.
# Copy's the given sample file to the analysis folder.
copyfile(args.sample_file, os.path.join(script_dir, "assemblies", args.assembly_code, "sample.csv"))
sample_file = open(os.path.join(script_dir, "assemblies", args.assembly_code, "sample.csv"))
given_samples = list(sample.split(",")[0] for sample in sample_file)[1:]
sample_file.close()
input_file, output_dict, samples = prepare_analysis.ParseFiles(args.bed, output_dir) # Make all .bed files
# Fill all .bed files with formatted info.
invalid_samples = prepare_analysis.IgvToRnBeads(input_file, output_dict, samples, output_dir, given_samples,
args.minimal_reads)
# If there are invalid samples (that have less than 5% of the reads of the maximum sample`s reads), Then
# there will be a new samples file created with these filtered out.
if invalid_samples:
original_sample_file = open(sample_file.name)
lines = original_sample_file.readlines()
original_sample_file.close()
new_sample_file = open(sample_file.name, "w")
for line in lines:
if line.split(",")[0] not in invalid_samples:
new_sample_file.write(line)
new_sample_file.close()
sys.stdout.write("There are: " + str(len(invalid_samples)) + """ samples filtered out of the sample file because
they have to few sites.\nSamples that are filtered out: """ + " and ".join(invalid_samples) + "\n")
sys.stdout.write("Finished: Preparing analysis files.\n")
# sys.exit(0)
def run_analysis(args):
"""
:argument: args, all arguments from argparse.
:return: Analyse directory.
Analysis function for the R RnBeads package. Prepares the template .R script run.analysis.
After the analysis, the output folder will be zipped in a given directory.
"""
cur_time = time.strftime("%d_%m_%Y_%H:%M")
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.scriptdir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
package_name = "".join(["BSgenome.", args.species_name, args.genus_name, ".NIOO.v", args.version])
if args.annotation: # Creates R readable code for the file locations of the analysis.
annotation_files = list()
annotation_names = list()
#TODO: adding multiple annotation files in galaxy does not yet work.
for annotation_file in args.annotation:
annotation_files.append('"'+annotation_file+'"')
annotation_names.append('"'+os.path.basename(os.path.splitext(annotation_file)[0])+'"')
annotation_files = ",".join(annotation_files)
annotation_names = ",".join(annotation_names)
else:
annotation_files = "NULL"
annotation_names = "NULL"
# Load template variable files and fill with correct parameters from script_dict
script_dict = {
"annotation_files": annotation_files,
"annotation_names": annotation_names,
"assembly": args.assembly_code,
"package": os.path.basename(package_name),
"species": args.species_name,
"lib_path": args.lib_path,
"directory": os.path.join(script_dir, "assemblies", args.assembly_code+"/"),
"cores": args.cores,
"time": cur_time}
# Puts the dict in the file.
template_script = open(os.path.join(script_dir,"templates","run.analysis.R")).read() % script_dict
# Write filled in analysis R script to tmp dir.
r_script = open(os.path.join(args.temp_directory, "run.analysis.R"), "w")
r_script.write(template_script)
r_script.close()
# Load r script in R and run consecutively without user input
command = "R < "+r_script.name+" --no-save"
log_message = "Running RnBeads analysis, this could take a while."
run_subprocess(command, log_message) # Running the main analysis.
if args.output:
# Compresses the RnBeads output folder and changes it with the .dat file Galaxy
compress_folder(args, cur_time)
return r_script.name
def install_assembly(args):
"""
Install the template assembly package with the given data and DESCRIPTION file.
"""
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.scriptdir # Gets the folder destination of the current script.
script_dir = script_dir.replace(' ','\ ')
assembly_package = script_dir+"/assembly"
# If the library path consists out of a empty list (R syntax) than the packages will be written to the standard
# package installation folder (mostly /usr/local/bin/R). Otherwise it will be written to the specific folder.
if args.lib_path == "c()":
specific_path = ""
else:
specific_path = " -l " + args.lib_path + " ; R_LIBS=" + args.lib_path + "; export R_LIBS"
command = "R CMD INSTALL " + assembly_package + specific_path
log_message = "Installing the new assembly package"
run_subprocess(command, log_message) # Installs the assembly with code:command
def get_cpg_sites(args, package_name):
"""
:argument: args, all arguments from argparse.
:param package_name: Name of the package.
Creates the sites and the region output for the assembly.
The sites.RData will contain the information of each CpG position and the regions.RData will contain
data of the optional annotation data.
After the installation of the assembly, the files will be deleted.
"""
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.scriptdir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
output_file_name = args.assembly_code+".CpG.RData"
output_dir = os.path.join(script_dir,'assembly','data',output_file_name)
#check if data exists as a subfolder of assembly!
if not os.path.exists(os.path.join(script_dir,'assembly','data')):
os.mkdir(os.path.join(script_dir,'assembly','data'))
region_output = os.path.join(script_dir.replace(' ','\ '),'assembly','data',args.assembly_code+'.regions.RData')
script_dict = {
"assembly": args.assembly_code,
"package": os.path.basename(package_name),
"species": args.species_name,
"sites_output": output_dir.replace(' ','\ '),
"regions_output": region_output,
"lib_path": args.lib_path,
"chromosomes": os.path.join(script_dir, "assemblies", args.assembly_code+"/bs.bed/chromosomes.txt")}
template_script_file = open(os.path.join(script_dir, "templates/get.sites.R"))
# Puts the dict in the string.
template_script = template_script_file.read() % script_dict
template_script_file.close()
# Makes a temporary R script and executes it after it is appended with the template script.
r_script = open(os.path.join(args.temp_directory,"get.sites.R"), "w")
r_script.write(template_script)
r_script.close()
command = "R < "+r_script.name+" --no-save"
log_message = "Adding CpG sites to assembly annotation."
run_subprocess(command, log_message) # Runs the annotation script.
return [output_dir, r_script.name, region_output] # For the cleanup of the /tmp/ folder.
def make_assembly_description(args, package_name):
"""
:argument: args, all arguments from argparse.
:param package_name: Name of the package.
Makes the description for the new assembly. The description is already made in a template though
the assembly and the package name needs to be specified.
"""
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.scriptdir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
description_dict = {
"assembly": args.assembly_code,
"package": os.path.basename(package_name)}
# Opens the template and puts the missing values in the string and writes it to a temporary file.
template_description_file = open(os.path.join(script_dir, "templates/assembly_description.DCF"))
template_description = template_description_file.read() % description_dict
template_description_file.close()
new_description = open(args.temp_directory+"DESCRIPTION", "w")
new_description.write(template_description)
new_description.close()
move(new_description.name, script_dir+"/assembly/DESCRIPTION") # Puts the new description file in the folder.
def install_rnbeads(args):
"""
Installs the appended RnBeads package to the /home/R folder of the user.
"""
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ','\ ')
else:
script_dir = args.scriptdir
rnbeads_package = os.path.join(script_dir,"RnBeads")
# If the library path consists out of a empty list (R syntax) than the packages will be written to the standard
# package installation folder (mostly /usr/local/bin/R). Otherwise it will be written to the specific folder.
if args.lib_path == "c()":
specific_path = ""
else:
specific_path = " -l " + args.lib_path + " ; R_LIBS=" + args.lib_path + "; export R_LIBS"
command = "R CMD INSTALL " + rnbeads_package + specific_path
log_message = "reinstalling the appended RnBeads package"
run_subprocess(command, log_message) # Runs the process.
def append_assembly(args):
"""
:argument: args, all arguments from argparse.
Appends the RnBeads package with the assembly annotation and appends the sourcecode to its destination folder.
"""
script_dir = os.path.dirname(os.path.realpath(__file__)) # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
annotation_file = "".join(['"', script_dir, "/RnBeads/data/annotations.RData", '"'])
assembly_dict = {
"file": annotation_file,
"assembly": args.assembly_code,
"tmp": args.temp_directory
}
# Opens the template and puts the missing values in the string and writes it to a temporary file.
template_script_file = open(os.path.join(script_dir, "templates/add.assembly.R"))
template_script = template_script_file.read() % assembly_dict
template_script_file.close()
# Writes the new string to a temporary .R file and at the end of the script it will be deleted.
add_assembly_script = open(os.path.join(args.temp_directory, "add.assembly.R"), "w")
add_assembly_script.write(template_script)
add_assembly_script.close()
command = "R < "+add_assembly_script.name+" --no-save"
log_message = "Adding assembly to RnBeads package."
run_subprocess(command, log_message) # Executes the command and appends the assembly data.
move(os.path.join(args.temp_directory,"annotations.RData"),\
os.path.join(script_dir,"RnBeads","data","annotations.RData"))
return add_assembly_script.name
def append_source_code(args, folder_name):
"""
Appends new genome Rfile to the existing R source code so that Rnbeads can be run on the 'new' assembly.
"""
#TODO: investigate safer / better method to check for doubles / existing genome assembly names. Do not add
#TODO: assemblies that already exist!
chrom_sizes = prepare_analysis.chrom_sizes(args.fasta, args.temp_directory, args.assembly_code)
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.scriptdir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
move(chrom_sizes.name, script_dir+"/RnBeads/inst/extdata/chromSizes/"+os.path.basename(chrom_sizes.name))
chromosome_var = "".join(["\n", args.assembly_code, '.chr <- read.table("',
os.path.join(script_dir, "assemblies", args.assembly_code+"/"),
'bs.bed/chromosomes.txt")\n', '##%(chromosomes)s'])
assembly_var = "".join(["\n,", "'"+args.assembly_code+"'", " = ", args.assembly_code, ".chr[[1]]\n",
'##%(assembly_table)s'])
package_var = "".join(["\nelse if (assembly == ", "'", args.assembly_code, "') {\n",
"suppressPackageStartupMessages(require(", os.path.basename(folder_name), "))\n",
"genome.data <- ", args.species_name, "\n}\n", "##%(assembly_package)s"])
annotation_dict = {
"chromosomes": chromosome_var,
"assembly_table": assembly_var,
"assembly_package": package_var}
# Reads the template script and puts the variable of the annotation dict in the script as a string.
annotation = open(script_dir+"/RnBeads/R/assemblies.R").read() % annotation_dict
# Makes a temporary file where the new changed R script will be written to and executed. Will be deleted at
# the end of the script.
new_annotation = open(script_dir+"/RnBeads/R/assemblies.R", "w")
new_annotation.write(annotation)
new_annotation.close()
def install_genome_file(args):
"""
Installs and builds the new BSgenome (R library) for the given .fasta file.
"""
tmp = args.temp_directory
# Opens the DESCRIPTION file for getting the package name of the genome.
description = open(os.path.join(args.temp_directory,"description.DCF"))
genome_folder = description.readline().strip("Package: \n")
description.close()
version_name = args.version+".0" # Syntax needs to be version + .0
command = ''.join(["cd ", tmp, ";R CMD build ", genome_folder, "/"])
log_message = "Building genome file"
run_subprocess(command, log_message) # Builds the genomic annotation file.
genome_package = ''.join([genome_folder, "_", version_name, ".tar.gz"])
# If the library path consists out of a empty list (R syntax) than the packages will be written to the standard
# package installation folder (mostly /usr/local/bin/R). Otherwise it will be written to the specific folder.
#c() is an empty list in R.
if args.lib_path == "c()":
specific_path = ""
else:
specific_path = " -l " + args.lib_path + " ; R_LIBS=" + args.lib_path + "; export R_LIBS"
command = "".join(["cd ", tmp, ";R CMD INSTALL ", genome_package, specific_path])
log_message = "Installing genome file"
run_subprocess(command, log_message) # After building the folder to a tar, the package can be installed.
return [genome_folder, genome_package]
def forge_genome_file(description, args):
"""
Creates the new package of the given fasta via the BSgenome.forge method in R.
"""
script_dir = os.path.dirname(os.path.realpath(__file__)) # Gets the folder destination of the current script.
#script_dir = script_dir.replace('Thomas ','Thomas\ ')
file_dict = {"DCF": description.name,
"tmp": args.temp_directory}
file_template = open(os.path.join(script_dir, "templates/forge.genome.R"))
forge_template = file_template.read()
file_template.close()
adjusted_forge_template = forge_template % file_dict # Puts the dict in the template string.
# Makes an empty R script and fills it with the appended template script.
# After it is appended, the script will be executed and afterwards it will be deleted.
forge_r_script = open(os.path.join(args.temp_directory,"forge.genome.R"), "w")
forge_r_script.write(adjusted_forge_template)
forge_r_script.close()
# Makes from the description file, a folder which can be build and installed via bash.
command = "R < "+forge_r_script.name+" --no-save"
log_message = "Forging genome file"
run_subprocess(command, log_message) # Runs the appended template script.
return forge_r_script.name
def make_rnbeads_description(twobit_file, args):
"""
Makes the description file for the RnBeads package.
"""
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.scriptdir # Gets the folder destination of the current script.
#script_dir = script_dir.replace(' ','\ ')
file_dict = {
"species": args.species_name,
"genus": args.genus_name,
"twobit_dir": args.temp_directory,
"twobit_name": os.path.basename(twobit_file),
"templ_dir": args.temp_directory,
"version": args.version}
with open(os.path.join(script_dir, "templates/DESCRIPTION_TEMPLATE.DCF")) as description_template:
description = description_template.read()
adjusted_template = description % file_dict # Puts the dict in the string gotten from the template.
# Writes the adjusted template to a new file that is written to the given temp folder.
description = open(os.path.join(args.temp_directory,"description.DCF"), "w")
description.write(adjusted_template)
description.close()
return description
def fasta_to_2bit(args):
"""
Coverts the given fasta to a .2bit file via the faToTwoBit executable.
"""
#TODO: build fato2bit from source on installation to make sure it runs.
#Source for fat2bit for mac osx is here: http://hgdownload.cse.ucsc.edu/admin/exe/macOSX.x86_64/
#TODO: or list dependency in help and check for executable upon running analysis.
if args.scriptdir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.scriptdir # Gets the folder destination of the current script.
script_dir = script_dir.replace(' ','\ ')
sys.stdout.write("""Adding the genome of """+args.species_name+"""" to the RnBeads package\n
Starting with converting the .fasta to a .2bit file.\n""")
fasta = args.fasta
twobit_name = os.path.basename(os.path.splitext(fasta)[0])+'.2bit'
twobit_file = os.path.join(args.temp_directory, twobit_name)
# Writes the fasta to a twobit file in the tmp folder and is deleted after the analysis.
log_message = "Converts the given fasta to a .2bit file"
print platform.system()
if platform.system() == 'Linux':
command = " ".join([os.path.join(script_dir, "templates/faToTwoBit_linux"), fasta, twobit_file])
else:
command = " ".join([os.path.join(script_dir, "templates/faToTwoBit"), fasta, twobit_file])
run_subprocess(command, log_message)
return twobit_file
def parse_args():
"""
Processes the given argparse arguments.
"""
parser = argparse.ArgumentParser(description="""Add new genome to the RnBeads R module.
Or, if already done, run only the analysis.""")
subparsers = parser.add_subparsers(help='Choose your analysis type:')
#TODO: add option to list already run analysis for existing genomes. Skip existing genomes.
# If chosen: only the run_analysis function will be executed.
analysis_only = subparsers.add_parser('analysis_only', help="""If assembly and genome already added:
analysis only is possible.""")
analysis_only.add_argument('-c', '--cores', help='Number of cores you want to use for the analysis', default="2")
analysis_only.add_argument('-a', '--annotation', default=None, nargs="*",
help="""Annotation file(s) to be added for the annotation (optional).
Extra files can be added by separating them with a whitespace.
File syntax: e.g. chr1\nchr2\n.
The name of the annotation will be the same as the filename.""")
analysis_only.add_argument('-tmp', '--temp_directory', help='temp directory', default="/tmp/")
analysis_only.add_argument('-s', '--species_name', help='Name of the species belonging to the genome')
analysis_only.add_argument('-g', '--genus_name', help='Name of the genus from where the organism stems from')
analysis_only.add_argument('-v', '--version', help='Version of the genome to be forged', default="1")
analysis_only.add_argument('-ac', '--assembly_code', help="""Assembly code used for your organism in the RnBeads
package""")
analysis_only.add_argument('-lp', '--lib_path', help='Library installation folder for R packages.', default="c()")
analysis_only.add_argument('-o', '--output', help='Output file (needed for galaxy)', default=None)
# If chosen all the main functions will be executed.
add_and_analysis = subparsers.add_parser('add_and_analysis', help="""Add genome and assembly AND analyse it
afterwards.""")
add_and_analysis.add_argument('-c', '--cores', help='Number of cores you want to use for the analysis', default="2")
add_and_analysis.add_argument('-f', '--fasta', help='Fasta input file of the new genome', default=None)
add_and_analysis.add_argument('-b', '--bed', help='Bed input file to be analysed. (optional if already done)',
default=None)
add_and_analysis.add_argument('-a', '--annotation', default=None, nargs="*",
help="""Annotation file(s) to be added for the annotation (optional).
Extra files can be added by separating them with a whitespace.
File syntax: e.g. chr1\nchr2\n.
The name of the annotation will be the same as the filename.""")
add_and_analysis.add_argument('-sf', '--sample_file', help='Sample file location.')
add_and_analysis.add_argument('-tmp', '--temp_directory', help='temp directory', default="/tmp/")
add_and_analysis.add_argument('-s', '--species_name', help='Name of the species belonging to the genome')
add_and_analysis.add_argument('-g', '--genus_name', help='Name of the genus from where the organism stems from')
add_and_analysis.add_argument('-v', '--version', help='Version of the genome to be forged', default="1")
add_and_analysis.add_argument('-ac', '--assembly_code', help="""Assembly code used for your organism in the RnBeads
package""")
add_and_analysis.add_argument('-o', '--output', help='Output file (needed for galaxy)', default=None)
add_and_analysis.add_argument('-lp', '--lib_path', help='Library installation folder for R packages.',
default="c()")
add_and_analysis.add_argument('-mr', '--minimal_reads', help='Number of minimal reads per sample on one CpG site',
default=5)
add_and_analysis.add_argument('-sd', '--script_dir', help='directory of script')
# If chosen: every main function fill be executed except for the run_analysis function.
add_only = subparsers.add_parser('add_only', help="Only add the genome and assembly to the RnBeads package.")
add_only.add_argument('-f', '--fasta', help='Fasta input file of the new genome', default=None)
add_only.add_argument('-b', '--bed', help='Bed input file to be analysed. (optional if already done)', default=None)
add_only.add_argument('-sf', '--sample_file', help='Sample file location.')
add_only.add_argument('-tmp', '--temp_directory', help='temp directory', default="/tmp/")
add_only.add_argument('-s', '--species_name', help='Name of the species belonging to the genome')
add_only.add_argument('-g', '--genus_name', help='Name of the genus from where the organism stems from')
add_only.add_argument('-v', '--version', help='Version of the genome to be forged', default="1")
add_only.add_argument('-ac', '--assembly_code', help='Assembly code used for your organism in the RnBeads package')
add_only.add_argument('-lp', '--lib_path', help='Library installation folder for R packages.', default="c()")
add_only.add_argument('-mr', '--minimal_reads', help='Number of minimal reads per sample on one CpG site',
default=5)
# Parses the arguments to the args variable.
args = parser.parse_args()
return args
def clear_tmp(trash):
"""
Clears all the items that are appended to the trash variable.
"""
for item in trash:
if os.path.isdir(item):
rmtree(item)
else:
try:
os.remove(item)
except OSError:
pass
# Calls the main function if the script is executed.
if __name__ == '__main__':
main()
script_dir instead of scriptdir
#!/usr/bin/env python
__author__ = 'Bjorn Wouters'
__email__ = "bjorn-wouters@hotmail.com"
"""
Description: Automatic adding and/or analysis of a specific genome in the R RnBeads package.
Version: 1.0.0.
Known bugs:
- stdout fix for Galaxy (not showing in Galaxy except on the end).
Dependencies:
- RnBeads package (edited for this script).
- Template package.
- prepare_analysis script.
- Assembly template folder.
Work to be done:
- Adding of the assembly to the source package isn't stable (can't delete assemblies folder or you'll get an error).
- Adding of more options for the analysis instead of using the default.
- Automatic sample file creation.
"""
import subprocess
import sys
import os
import platform
from shutil import rmtree, move, copyfile
import tarfile
import argparse
import prepare_analysis
import check_file_formats
import time
def main():
tmp_files = list()
# Parse all the arguments given be the argparse module.
args = parse_args()
try:
# Means the argparse subprocess "only_analysis" is chosen.
if "fasta" not in args:
# Run analysis with (optional) added annotation.
analysis_script = run_analysis(args)
tmp_files.append(analysis_script)
sys.exit(0)
# Checks the files on the right syntax.
check_file_formats.check_fasta(args.fasta)
check_file_formats.check_bed(args.bed)
check_file_formats.check_sample_file(args.sample_file)
# Prepare analysis files
if args.bed:
prepare_bed_analysis(args)
# Coverts given .Fasta file to a 2bit file and writes it to the /tmp/ directory.
twobit_file = fasta_to_2bit(args)
tmp_files.append(twobit_file)
# Makes description file out of the species and genus information given by the argparse arguments and
# writes it to the /tmp/ directory.
description = make_rnbeads_description(twobit_file, args)
tmp_files.append(description.name)
# Creates the new package of the given fasta via the BSgenome.forge method in R.
#remove all existing BSgenome.* in /tmp
cmd = "rm -rf %s"%os.path.join(args.temp_directory,'BSgenome*')
run_subprocess(cmd,"removing all existing BSgenome packages")
forge_script = forge_genome_file(description, args)
tmp_files.append(forge_script)
# Installs the genome file to R on your system.
genome_folder, genome_package = install_genome_file(args)
tmp_files.extend([genome_folder, genome_package])
# Append the RnBeads sourcecode with the new assembly.
append_source_code(args, genome_folder)
# Append assembly file of the RnBeads package.
append_script = append_assembly(args)
tmp_files.append(append_script)
# Makes the description file for the given assembly.
make_assembly_description(args, genome_folder)
# Install the new RnBeads package so appended source code will be used in the analysis.
install_rnbeads(args)
# Calculates all the CpG sites for each chromosomes and appends it to the assembly package.
site_data, r_script, region_file = get_cpg_sites(args, genome_folder)
tmp_files.extend([site_data, r_script, region_file])
# Installs the given assembly with the annotated site data
install_assembly(args)
# Means the "add_and_analysis" is chosen.
if "cores" in args:
# Run analysis with (optional) added annotation.
analysis_script = run_analysis(args)
tmp_files.append(analysis_script)
finally:
# Clear all files written to the tmp directory.
#clear_tmp(tmp_files)
a=1
def run_subprocess(cmd, log_message):
"""
Run subprocess under standardized settings
force the cmds to be a string
"""
sys.stdout.write("now starting:\t%s\n\n" % log_message)
sys.stdout.write('running:\t%s\n\n' % cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, executable='/bin/bash')
exit_code = p.wait()
stdout = p.stdout.read().replace('\r', '\n')
stderr = p.stderr.read().replace('\r', '\n')
if stdout:
sys.stdout.write('stdout:\n%s\n' % stdout)
if stderr:
sys.stdout.write('stderr:\n%s\n' % stderr)
sys.stdout.write('finished:\t%s\n\n' % log_message)
if exit_code:
return Exception("Call of %s failed with \n %s" % (cmd, stderr))
else:
return 0
def compress_folder(args, cur_time):
"""
For Galaxy: After finishing the analysis, the folder will be zipped in tar.gz format.
Replaces the Galaxy .dat file with the compressed analysis folder.
"""
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.script_dir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
analysis_folder = os.path.join(script_dir, "assemblies", args.assembly_code, cur_time)
if not os.path.isdir(analysis_folder):
return
tar_file = tarfile.open(os.path.join(args.temp_directory, cur_time+"_analysis.tar.gz"), "w:gz")
tar_file.add(analysis_folder, arcname=cur_time+"_analysis")
tar_file.close()
move(tar_file.name, args.output)
def prepare_bed_analysis(args):
"""
:argument: args, all arguments from argparse.
Preparing analysis which means:
- Making .bed files with each EPP annotated for every sample.
- Making chromosomes file with each valid chromosome.
- Making the analysis folder.
"""
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.script_dir
# script_dir = script_dir.replace(' ','\ ')
sys.stdout.write("Starting: Preparing analysis files.\n")
output_dir = os.path.join(script_dir, "assemblies", args.assembly_code, "bs.bed/")
if not os.path.exists(output_dir):
os.makedirs(output_dir) # Create folder if not already exists.
# Copy's the given sample file to the analysis folder.
copyfile(args.sample_file, os.path.join(script_dir, "assemblies", args.assembly_code, "sample.csv"))
sample_file = open(os.path.join(script_dir, "assemblies", args.assembly_code, "sample.csv"))
given_samples = list(sample.split(",")[0] for sample in sample_file)[1:]
sample_file.close()
input_file, output_dict, samples = prepare_analysis.ParseFiles(args.bed, output_dir) # Make all .bed files
# Fill all .bed files with formatted info.
invalid_samples = prepare_analysis.IgvToRnBeads(input_file, output_dict, samples, output_dir, given_samples,
args.minimal_reads)
# If there are invalid samples (that have less than 5% of the reads of the maximum sample`s reads), Then
# there will be a new samples file created with these filtered out.
if invalid_samples:
original_sample_file = open(sample_file.name)
lines = original_sample_file.readlines()
original_sample_file.close()
new_sample_file = open(sample_file.name, "w")
for line in lines:
if line.split(",")[0] not in invalid_samples:
new_sample_file.write(line)
new_sample_file.close()
sys.stdout.write("There are: " + str(len(invalid_samples)) + """ samples filtered out of the sample file because
they have to few sites.\nSamples that are filtered out: """ + " and ".join(invalid_samples) + "\n")
sys.stdout.write("Finished: Preparing analysis files.\n")
# sys.exit(0)
def run_analysis(args):
"""
:argument: args, all arguments from argparse.
:return: Analyse directory.
Analysis function for the R RnBeads package. Prepares the template .R script run.analysis.
After the analysis, the output folder will be zipped in a given directory.
"""
cur_time = time.strftime("%d_%m_%Y_%H:%M")
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.script_dir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
package_name = "".join(["BSgenome.", args.species_name, args.genus_name, ".NIOO.v", args.version])
if args.annotation: # Creates R readable code for the file locations of the analysis.
annotation_files = list()
annotation_names = list()
#TODO: adding multiple annotation files in galaxy does not yet work.
for annotation_file in args.annotation:
annotation_files.append('"'+annotation_file+'"')
annotation_names.append('"'+os.path.basename(os.path.splitext(annotation_file)[0])+'"')
annotation_files = ",".join(annotation_files)
annotation_names = ",".join(annotation_names)
else:
annotation_files = "NULL"
annotation_names = "NULL"
# Load template variable files and fill with correct parameters from script_dict
script_dict = {
"annotation_files": annotation_files,
"annotation_names": annotation_names,
"assembly": args.assembly_code,
"package": os.path.basename(package_name),
"species": args.species_name,
"lib_path": args.lib_path,
"directory": os.path.join(script_dir, "assemblies", args.assembly_code+"/"),
"cores": args.cores,
"time": cur_time}
# Puts the dict in the file.
template_script = open(os.path.join(script_dir,"templates","run.analysis.R")).read() % script_dict
# Write filled in analysis R script to tmp dir.
r_script = open(os.path.join(args.temp_directory, "run.analysis.R"), "w")
r_script.write(template_script)
r_script.close()
# Load r script in R and run consecutively without user input
command = "R < "+r_script.name+" --no-save"
log_message = "Running RnBeads analysis, this could take a while."
run_subprocess(command, log_message) # Running the main analysis.
if args.output:
# Compresses the RnBeads output folder and changes it with the .dat file Galaxy
compress_folder(args, cur_time)
return r_script.name
def install_assembly(args):
"""
Install the template assembly package with the given data and DESCRIPTION file.
"""
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.script_dir # Gets the folder destination of the current script.
script_dir = script_dir.replace(' ','\ ')
assembly_package = script_dir+"/assembly"
# If the library path consists out of a empty list (R syntax) than the packages will be written to the standard
# package installation folder (mostly /usr/local/bin/R). Otherwise it will be written to the specific folder.
if args.lib_path == "c()":
specific_path = ""
else:
specific_path = " -l " + args.lib_path + " ; R_LIBS=" + args.lib_path + "; export R_LIBS"
command = "R CMD INSTALL " + assembly_package + specific_path
log_message = "Installing the new assembly package"
run_subprocess(command, log_message) # Installs the assembly with code:command
def get_cpg_sites(args, package_name):
"""
:argument: args, all arguments from argparse.
:param package_name: Name of the package.
Creates the sites and the region output for the assembly.
The sites.RData will contain the information of each CpG position and the regions.RData will contain
data of the optional annotation data.
After the installation of the assembly, the files will be deleted.
"""
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.script_dir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
output_file_name = args.assembly_code+".CpG.RData"
output_dir = os.path.join(script_dir,'assembly','data',output_file_name)
#check if data exists as a subfolder of assembly!
if not os.path.exists(os.path.join(script_dir,'assembly','data')):
os.mkdir(os.path.join(script_dir,'assembly','data'))
region_output = os.path.join(script_dir.replace(' ','\ '),'assembly','data',args.assembly_code+'.regions.RData')
script_dict = {
"assembly": args.assembly_code,
"package": os.path.basename(package_name),
"species": args.species_name,
"sites_output": output_dir.replace(' ','\ '),
"regions_output": region_output,
"lib_path": args.lib_path,
"chromosomes": os.path.join(script_dir, "assemblies", args.assembly_code+"/bs.bed/chromosomes.txt")}
template_script_file = open(os.path.join(script_dir, "templates/get.sites.R"))
# Puts the dict in the string.
template_script = template_script_file.read() % script_dict
template_script_file.close()
# Makes a temporary R script and executes it after it is appended with the template script.
r_script = open(os.path.join(args.temp_directory,"get.sites.R"), "w")
r_script.write(template_script)
r_script.close()
command = "R < "+r_script.name+" --no-save"
log_message = "Adding CpG sites to assembly annotation."
run_subprocess(command, log_message) # Runs the annotation script.
return [output_dir, r_script.name, region_output] # For the cleanup of the /tmp/ folder.
def make_assembly_description(args, package_name):
"""
:argument: args, all arguments from argparse.
:param package_name: Name of the package.
Makes the description for the new assembly. The description is already made in a template though
the assembly and the package name needs to be specified.
"""
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.script_dir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
description_dict = {
"assembly": args.assembly_code,
"package": os.path.basename(package_name)}
# Opens the template and puts the missing values in the string and writes it to a temporary file.
template_description_file = open(os.path.join(script_dir, "templates/assembly_description.DCF"))
template_description = template_description_file.read() % description_dict
template_description_file.close()
new_description = open(args.temp_directory+"DESCRIPTION", "w")
new_description.write(template_description)
new_description.close()
move(new_description.name, script_dir+"/assembly/DESCRIPTION") # Puts the new description file in the folder.
def install_rnbeads(args):
"""
Installs the appended RnBeads package to the /home/R folder of the user.
"""
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ','\ ')
else:
script_dir = args.script_dir
rnbeads_package = os.path.join(script_dir,"RnBeads")
# If the library path consists out of a empty list (R syntax) than the packages will be written to the standard
# package installation folder (mostly /usr/local/bin/R). Otherwise it will be written to the specific folder.
if args.lib_path == "c()":
specific_path = ""
else:
specific_path = " -l " + args.lib_path + " ; R_LIBS=" + args.lib_path + "; export R_LIBS"
command = "R CMD INSTALL " + rnbeads_package + specific_path
log_message = "reinstalling the appended RnBeads package"
run_subprocess(command, log_message) # Runs the process.
def append_assembly(args):
"""
:argument: args, all arguments from argparse.
Appends the RnBeads package with the assembly annotation and appends the sourcecode to its destination folder.
"""
script_dir = os.path.dirname(os.path.realpath(__file__)) # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
annotation_file = "".join(['"', script_dir, "/RnBeads/data/annotations.RData", '"'])
assembly_dict = {
"file": annotation_file,
"assembly": args.assembly_code,
"tmp": args.temp_directory
}
# Opens the template and puts the missing values in the string and writes it to a temporary file.
template_script_file = open(os.path.join(script_dir, "templates/add.assembly.R"))
template_script = template_script_file.read() % assembly_dict
template_script_file.close()
# Writes the new string to a temporary .R file and at the end of the script it will be deleted.
add_assembly_script = open(os.path.join(args.temp_directory, "add.assembly.R"), "w")
add_assembly_script.write(template_script)
add_assembly_script.close()
command = "R < "+add_assembly_script.name+" --no-save"
log_message = "Adding assembly to RnBeads package."
run_subprocess(command, log_message) # Executes the command and appends the assembly data.
move(os.path.join(args.temp_directory,"annotations.RData"),\
os.path.join(script_dir,"RnBeads","data","annotations.RData"))
return add_assembly_script.name
def append_source_code(args, folder_name):
"""
Appends new genome Rfile to the existing R source code so that Rnbeads can be run on the 'new' assembly.
"""
#TODO: investigate safer / better method to check for doubles / existing genome assembly names. Do not add
#TODO: assemblies that already exist!
chrom_sizes = prepare_analysis.chrom_sizes(args.fasta, args.temp_directory, args.assembly_code)
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.script_dir # Gets the folder destination of the current script.
# script_dir = script_dir.replace(' ','\ ')
move(chrom_sizes.name, script_dir+"/RnBeads/inst/extdata/chromSizes/"+os.path.basename(chrom_sizes.name))
chromosome_var = "".join(["\n", args.assembly_code, '.chr <- read.table("',
os.path.join(script_dir, "assemblies", args.assembly_code+"/"),
'bs.bed/chromosomes.txt")\n', '##%(chromosomes)s'])
assembly_var = "".join(["\n,", "'"+args.assembly_code+"'", " = ", args.assembly_code, ".chr[[1]]\n",
'##%(assembly_table)s'])
package_var = "".join(["\nelse if (assembly == ", "'", args.assembly_code, "') {\n",
"suppressPackageStartupMessages(require(", os.path.basename(folder_name), "))\n",
"genome.data <- ", args.species_name, "\n}\n", "##%(assembly_package)s"])
annotation_dict = {
"chromosomes": chromosome_var,
"assembly_table": assembly_var,
"assembly_package": package_var}
# Reads the template script and puts the variable of the annotation dict in the script as a string.
annotation = open(script_dir+"/RnBeads/R/assemblies.R").read() % annotation_dict
# Makes a temporary file where the new changed R script will be written to and executed. Will be deleted at
# the end of the script.
new_annotation = open(script_dir+"/RnBeads/R/assemblies.R", "w")
new_annotation.write(annotation)
new_annotation.close()
def install_genome_file(args):
"""
Installs and builds the new BSgenome (R library) for the given .fasta file.
"""
tmp = args.temp_directory
# Opens the DESCRIPTION file for getting the package name of the genome.
description = open(os.path.join(args.temp_directory,"description.DCF"))
genome_folder = description.readline().strip("Package: \n")
description.close()
version_name = args.version+".0" # Syntax needs to be version + .0
command = ''.join(["cd ", tmp, ";R CMD build ", genome_folder, "/"])
log_message = "Building genome file"
run_subprocess(command, log_message) # Builds the genomic annotation file.
genome_package = ''.join([genome_folder, "_", version_name, ".tar.gz"])
# If the library path consists out of a empty list (R syntax) than the packages will be written to the standard
# package installation folder (mostly /usr/local/bin/R). Otherwise it will be written to the specific folder.
#c() is an empty list in R.
if args.lib_path == "c()":
specific_path = ""
else:
specific_path = " -l " + args.lib_path + " ; R_LIBS=" + args.lib_path + "; export R_LIBS"
command = "".join(["cd ", tmp, ";R CMD INSTALL ", genome_package, specific_path])
log_message = "Installing genome file"
run_subprocess(command, log_message) # After building the folder to a tar, the package can be installed.
return [genome_folder, genome_package]
def forge_genome_file(description, args):
"""
Creates the new package of the given fasta via the BSgenome.forge method in R.
"""
script_dir = os.path.dirname(os.path.realpath(__file__)) # Gets the folder destination of the current script.
#script_dir = script_dir.replace('Thomas ','Thomas\ ')
file_dict = {"DCF": description.name,
"tmp": args.temp_directory}
file_template = open(os.path.join(script_dir, "templates/forge.genome.R"))
forge_template = file_template.read()
file_template.close()
adjusted_forge_template = forge_template % file_dict # Puts the dict in the template string.
# Makes an empty R script and fills it with the appended template script.
# After it is appended, the script will be executed and afterwards it will be deleted.
forge_r_script = open(os.path.join(args.temp_directory,"forge.genome.R"), "w")
forge_r_script.write(adjusted_forge_template)
forge_r_script.close()
# Makes from the description file, a folder which can be build and installed via bash.
command = "R < "+forge_r_script.name+" --no-save"
log_message = "Forging genome file"
run_subprocess(command, log_message) # Runs the appended template script.
return forge_r_script.name
def make_rnbeads_description(twobit_file, args):
"""
Makes the description file for the RnBeads package.
"""
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.script_dir # Gets the folder destination of the current script.
#script_dir = script_dir.replace(' ','\ ')
file_dict = {
"species": args.species_name,
"genus": args.genus_name,
"twobit_dir": args.temp_directory,
"twobit_name": os.path.basename(twobit_file),
"templ_dir": args.temp_directory,
"version": args.version}
with open(os.path.join(script_dir, "templates/DESCRIPTION_TEMPLATE.DCF")) as description_template:
description = description_template.read()
adjusted_template = description % file_dict # Puts the dict in the string gotten from the template.
# Writes the adjusted template to a new file that is written to the given temp folder.
description = open(os.path.join(args.temp_directory,"description.DCF"), "w")
description.write(adjusted_template)
description.close()
return description
def fasta_to_2bit(args):
"""
Coverts the given fasta to a .2bit file via the faToTwoBit executable.
"""
#TODO: build fato2bit from source on installation to make sure it runs.
#Source for fat2bit for mac osx is here: http://hgdownload.cse.ucsc.edu/admin/exe/macOSX.x86_64/
#TODO: or list dependency in help and check for executable upon running analysis.
if args.script_dir == None:
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = script_dir.replace(' ', '\ ')
else:
script_dir = args.script_dir # Gets the folder destination of the current script.
script_dir = script_dir.replace(' ','\ ')
sys.stdout.write("""Adding the genome of """+args.species_name+"""" to the RnBeads package\n
Starting with converting the .fasta to a .2bit file.\n""")
fasta = args.fasta
twobit_name = os.path.basename(os.path.splitext(fasta)[0])+'.2bit'
twobit_file = os.path.join(args.temp_directory, twobit_name)
# Writes the fasta to a twobit file in the tmp folder and is deleted after the analysis.
log_message = "Converts the given fasta to a .2bit file"
print platform.system()
if platform.system() == 'Linux':
command = " ".join([os.path.join(script_dir, "templates/faToTwoBit_linux"), fasta, twobit_file])
else:
command = " ".join([os.path.join(script_dir, "templates/faToTwoBit"), fasta, twobit_file])
run_subprocess(command, log_message)
return twobit_file
def parse_args():
"""
Processes the given argparse arguments.
"""
parser = argparse.ArgumentParser(description="""Add new genome to the RnBeads R module.
Or, if already done, run only the analysis.""")
subparsers = parser.add_subparsers(help='Choose your analysis type:')
#TODO: add option to list already run analysis for existing genomes. Skip existing genomes.
# If chosen: only the run_analysis function will be executed.
analysis_only = subparsers.add_parser('analysis_only', help="""If assembly and genome already added:
analysis only is possible.""")
analysis_only.add_argument('-c', '--cores', help='Number of cores you want to use for the analysis', default="2")
analysis_only.add_argument('-a', '--annotation', default=None, nargs="*",
help="""Annotation file(s) to be added for the annotation (optional).
Extra files can be added by separating them with a whitespace.
File syntax: e.g. chr1\nchr2\n.
The name of the annotation will be the same as the filename.""")
analysis_only.add_argument('-tmp', '--temp_directory', help='temp directory', default="/tmp/")
analysis_only.add_argument('-s', '--species_name', help='Name of the species belonging to the genome')
analysis_only.add_argument('-g', '--genus_name', help='Name of the genus from where the organism stems from')
analysis_only.add_argument('-v', '--version', help='Version of the genome to be forged', default="1")
analysis_only.add_argument('-ac', '--assembly_code', help="""Assembly code used for your organism in the RnBeads
package""")
analysis_only.add_argument('-lp', '--lib_path', help='Library installation folder for R packages.', default="c()")
analysis_only.add_argument('-o', '--output', help='Output file (needed for galaxy)', default=None)
# If chosen all the main functions will be executed.
add_and_analysis = subparsers.add_parser('add_and_analysis', help="""Add genome and assembly AND analyse it
afterwards.""")
add_and_analysis.add_argument('-c', '--cores', help='Number of cores you want to use for the analysis', default="2")
add_and_analysis.add_argument('-f', '--fasta', help='Fasta input file of the new genome', default=None)
add_and_analysis.add_argument('-b', '--bed', help='Bed input file to be analysed. (optional if already done)',
default=None)
add_and_analysis.add_argument('-a', '--annotation', default=None, nargs="*",
help="""Annotation file(s) to be added for the annotation (optional).
Extra files can be added by separating them with a whitespace.
File syntax: e.g. chr1\nchr2\n.
The name of the annotation will be the same as the filename.""")
add_and_analysis.add_argument('-sf', '--sample_file', help='Sample file location.')
add_and_analysis.add_argument('-tmp', '--temp_directory', help='temp directory', default="/tmp/")
add_and_analysis.add_argument('-s', '--species_name', help='Name of the species belonging to the genome')
add_and_analysis.add_argument('-g', '--genus_name', help='Name of the genus from where the organism stems from')
add_and_analysis.add_argument('-v', '--version', help='Version of the genome to be forged', default="1")
add_and_analysis.add_argument('-ac', '--assembly_code', help="""Assembly code used for your organism in the RnBeads
package""")
add_and_analysis.add_argument('-o', '--output', help='Output file (needed for galaxy)', default=None)
add_and_analysis.add_argument('-lp', '--lib_path', help='Library installation folder for R packages.',
default="c()")
add_and_analysis.add_argument('-mr', '--minimal_reads', help='Number of minimal reads per sample on one CpG site',
default=5)
add_and_analysis.add_argument('-sd', '--script_dir', help='directory of script')
# If chosen: every main function fill be executed except for the run_analysis function.
add_only = subparsers.add_parser('add_only', help="Only add the genome and assembly to the RnBeads package.")
add_only.add_argument('-f', '--fasta', help='Fasta input file of the new genome', default=None)
add_only.add_argument('-b', '--bed', help='Bed input file to be analysed. (optional if already done)', default=None)
add_only.add_argument('-sf', '--sample_file', help='Sample file location.')
add_only.add_argument('-tmp', '--temp_directory', help='temp directory', default="/tmp/")
add_only.add_argument('-s', '--species_name', help='Name of the species belonging to the genome')
add_only.add_argument('-g', '--genus_name', help='Name of the genus from where the organism stems from')
add_only.add_argument('-v', '--version', help='Version of the genome to be forged', default="1")
add_only.add_argument('-ac', '--assembly_code', help='Assembly code used for your organism in the RnBeads package')
add_only.add_argument('-lp', '--lib_path', help='Library installation folder for R packages.', default="c()")
add_only.add_argument('-mr', '--minimal_reads', help='Number of minimal reads per sample on one CpG site',
default=5)
# Parses the arguments to the args variable.
args = parser.parse_args()
return args
def clear_tmp(trash):
"""
Clears all the items that are appended to the trash variable.
"""
for item in trash:
if os.path.isdir(item):
rmtree(item)
else:
try:
os.remove(item)
except OSError:
pass
# Calls the main function if the script is executed.
if __name__ == '__main__':
main()
|
"""Definitions of tests for component metadata stored in the AWS S3 database."""
from behave import given, then, when
from src.attribute_checks import *
from src.s3interface import *
from src.utils import split_comma_separated_list
@then('I should find the correct component core data for package {package} version {version} '
'from ecosystem {ecosystem}')
@then('I should find the correct component toplevel metadata for package {package} '
'version {version} ecosystem {ecosystem} with latest version {version}')
def check_component_core_data(context, package, version, ecosystem):
"""Check the component core data read from the AWS S3 database.
Expected format (with an example data):
{
"analyses": [
"security_issues",
"metadata",
"keywords_tagging",
"digests",
"source_licenses",
"dependency_snapshot"
],
"audit": null,
"dependents_count": -1,
"ecosystem": "pypi",
"finished_at": "2017-10-06T13:41:43.450021",
"id": 1,
"latest_version": "0.2.4",
"package": "clojure_py",
"package_info": {
"dependents_count": -1,
"relative_usage": "not used"
},
"release": "pypi:clojure_py:0.2.4",
"started_at": "2017-10-06T13:39:30.134801",
"subtasks": null,
"version": "0.2.4"
}
"""
data = context.s3_data
started_at = check_and_get_attribute(data, "started_at")
check_timestamp(started_at)
finished_at = check_and_get_attribute(data, "finished_at")
check_timestamp(finished_at)
actual_ecosystem = check_and_get_attribute(data, "ecosystem")
assert ecosystem == actual_ecosystem, "Ecosystem {e1} differs from expected " \
"ecosystem {e2}".format(e1=actual_ecosystem, e2=ecosystem)
actual_package = check_and_get_attribute(data, "package")
assert package == actual_package, "Package {p1} differs from expected " \
"package {p2}".format(p1=actual_package, p2=package)
actual_version = check_and_get_attribute(data, "version")
assert version == actual_version, "Version {v1} differs from expected " \
"version {v2}".format(v1=actual_version, v2=version)
actual_release = check_and_get_attribute(data, "release")
release = release_string(ecosystem, package, version)
assert actual_release == release, "Release string {r1} differs from expected " \
"value {r2}".format(r1=actual_release, r2=release)
# the following attributes are expected to be presented for all component toplevel metadata
attributes_to_check = ["id", "analyses", "audit", "dependents_count", "latest_version",
"package_info", "subtasks"]
check_attributes_presence(data, attributes_to_check)
# NOTE: 'analyses' subnode has to be checked in explicit test steps
def _node_items_to_check(context, items, node):
expected_items = split_comma_separated_list(items)
assert expected_items is not None
data = context.s3_data
returned_items = check_and_get_attribute(data, node)
assert returned_items is not None
return returned_items, expected_items
@then('I should find the following items ({items}) in the {node} node')
def check_expected_items_in_node(context, items, node):
"""Check if all expected items can be found in given node."""
returned_items, expected_items = _node_items_to_check(context, items, node)
check_attributes_presence(returned_items, expected_items)
@then('I should not find any items apart from ({items}) in the {node} node')
def check_unexpected_items_in_node(context, items, node):
"""Check that only expected items can be found in given node."""
returned_items, expected_items = _node_items_to_check(context, items, node)
for item in returned_items:
# check that the item is contained in a list of expected items
if item not in expected_items:
print(item)
raise Exception("Unexpected item has been found: {item}".format(
item=item))
@then('I should find that the latest component version is {version}')
def check_component_latest_version(context, version):
"""Check the value of attribute 'latest_version' stored in component metadata."""
data = context.s3_data
latest_version = check_and_get_attribute(data, "latest_version")
assert version == latest_version, "Latest version should be set to {v1}, " \
"but {v2} has been found instead".format(v1=version, v2=latest_version)
@then('I should find the correct dependency snapshot data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_dependency_snapshot_data(context, package, version, ecosystem):
"""Check the dependency snapshot metadata for the given component."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "dependency_snapshot", "1-0-0")
check_status_attribute(data)
check_summary_attribute(data)
@then('I should find {num:d} runtime details in dependency snapshot')
def check_runtime_dependency_count(context, num):
"""Check the number of runtime details for selected component."""
data = context.s3_data
details = check_and_get_attribute(data, "details")
runtime = check_and_get_attribute(details, "runtime")
cnt = len(runtime)
assert cnt == num, "Expected {n1} runtime details, but found {n2}".format(n1=num, n2=cnt)
@then('I should find {num:d} dependencies in dependency snapshot summary')
def check_runtime_dependency_count_in_summary(context, num):
"""Check the number of dependencies in dependency snapshot summary."""
data = context.s3_data
summary = check_and_get_attribute(data, "summary")
dependency_counts = check_and_get_attribute(summary, "dependency_counts")
runtime_count = check_and_get_attribute(dependency_counts, "runtime")
cnt = int(runtime_count)
assert cnt == num, "Expected {n1} runtime dependency counts, but found {n2}".format(
n1=num, n2=cnt)
@then('I should find the correct digest data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_digest_data(context, package, version, ecosystem):
"""Check the digest data for the given package, version, and ecosystem."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "digests", "1-0-0")
check_status_attribute(data)
check_summary_attribute(data)
check_attribute_presence(data, "details")
@then('I should find digest metadata {selector} set to {expected_value}')
def check_component_digest_metadata_value(context, selector, expected_value):
"""Check if the digest metadata can be found for the component."""
data = context.s3_data
details = check_and_get_attribute(data, "details")
for detail in details:
actual_value = check_and_get_attribute(detail, selector)
if actual_value == expected_value:
return
# nothing was found
raise Exception('Can not find the digest metadata {selector} set to {expected_value}'.format(
selector=selector, expected_value=expected_value))
@then('I should find the correct keywords tagging data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_keywords_tagging_data(context, package, version, ecosystem):
"""Check the keywords tagging metadata for given component."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
# no schema to check (yet?)
# tracked here: https://github.com/openshiftio/openshift.io/issues/1074
check_status_attribute(data)
check_summary_attribute(data)
@then('I should find the correct metadata for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_metadata_data(context, package, version, ecosystem):
"""Check the basic component metadata in the AWS S3 database."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "metadata", "3-2-0")
check_status_attribute(data)
check_summary_attribute(data)
@then('I should find that author of this project is {author}')
def check_package_author(context, author):
"""Check the author of component."""
details = get_details_node(context)[0]
actual_author = check_and_get_attribute(details, "author")
assert actual_author.startswith(author), "Expected author {a1}, " \
"but {a2} has been found instead".format(a1=author, a2=actual_author)
@then('I should find that the project use {vcs} as a version control system')
def check_vsc(context, vcs):
"""Check the type of version control system for the component."""
details = get_details_node(context)[0]
code_repository = check_and_get_attribute(details, "code_repository")
actual_vcs = check_and_get_attribute(code_repository, "type")
assert actual_vcs == vcs.lower(), "Expected {v1} version control system type, " \
"but {v2} has been found instead".format(v1=vcs, v2=actual_vcs)
@then('I should find that the repository can be found at {url}')
def check_repository_url(context, url):
"""Check the repository URL (if set) for the component."""
details = get_details_node(context)[0]
code_repository = check_and_get_attribute(details, "code_repository")
actual_url = check_and_get_attribute(code_repository, "url")
assert actual_url == url, "Repository URL should be set to {u1}, " \
"but {u2} has been found instead".format(u1=url, u2=actual_url)
@then('I should find that the project homepage can be found at {url}')
def check_project_homepage(context, url):
"""Check the project homepage (if exist) for the component."""
details = get_details_node(context)[0]
actual_homepage = check_and_get_attribute(details, "homepage")
assert actual_homepage == url, "Homepage URL should be set to {u1}, " \
"but {u2} has been found instead".format(u1=url, u2=actual_homepage)
@then('I should find that the package description is {description}')
def check_project_description(context, description):
"""Check the package description existence and content."""
details = get_details_node(context)[0]
actual_description = check_and_get_attribute(details, "description")
assert actual_description == description, "Description is set to {d1}, " \
"but {d2} is expected".format(d1=actual_description, d2=description)
@then('I should find that the package name is {name} and version is {version}')
def check_package_name_and_version(context, name, version):
"""Check the package name and version."""
details = get_details_node(context)[0]
actual_name = check_and_get_attribute(details, "name")
actual_version = check_and_get_attribute(details, "version")
assert name == actual_name, "Name '{n1}' is different from " \
"expected name '{n2}'".format(n1=actual_name, n2=name)
assert version == actual_version, "Version {v1} is different from expected " \
"version {v2}".format(v1=actual_version, v2=version)
@then('I should find the correct security issues data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_security_issues_data(context, package, version, ecosystem):
"""Check the security issues metadata fro given component."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "security_issues", "3-0-1")
check_status_attribute(data)
check_summary_attribute(data)
details = check_and_get_attribute(data, "details")
assert type(details) is list
@then('I should find the correct source licenses data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_source_licenses_data(context, package, version, ecosystem):
"""Check that the component has assigned correct source licenses metadata."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "source_licenses", "3-0-0")
check_status_attribute(data)
check_summary_attribute(data)
@then('I should find that the package uses {license} license')
def check_package_license(context, license):
"""Check that the package has assigned given license."""
details = get_details_node(context)
licenses = check_and_get_attribute(details, "licenses")
assert license in licenses, "Can not find license {lic}".format(lic=license)
@when('I read component toplevel metadata for the package {package} version {version} in ecosystem '
'{ecosystem} from the AWS S3 database bucket {bucket}')
def read_core_data_from_bucket(context, package, version, ecosystem, bucket):
"""Read the component toplevel metadata."""
key = S3Interface.component_key(ecosystem, package, version)
s3_data = context.s3interface.read_object(bucket, key)
assert s3_data is not None
context.s3_data = s3_data
More precise specification of component data test step
"""Definitions of tests for component metadata stored in the AWS S3 database."""
from behave import given, then, when
from src.attribute_checks import *
from src.s3interface import *
from src.utils import split_comma_separated_list
@then('I should find the correct component core data for package {package} version {version} '
'from ecosystem {ecosystem}')
@then('I should find the correct component toplevel metadata for package {package:S} '
'version {version:S} ecosystem {ecosystem:S} with latest version {version2:S}')
def check_component_core_data(context, package, version, ecosystem, version2):
"""Check the component core data read from the AWS S3 database.
Expected format (with an example data):
{
"analyses": [
"security_issues",
"metadata",
"keywords_tagging",
"digests",
"source_licenses",
"dependency_snapshot"
],
"audit": null,
"dependents_count": -1,
"ecosystem": "pypi",
"finished_at": "2017-10-06T13:41:43.450021",
"id": 1,
"latest_version": "0.2.4",
"package": "clojure_py",
"package_info": {
"dependents_count": -1,
"relative_usage": "not used"
},
"release": "pypi:clojure_py:0.2.4",
"started_at": "2017-10-06T13:39:30.134801",
"subtasks": null,
"version": "0.2.4"
}
"""
data = context.s3_data
started_at = check_and_get_attribute(data, "started_at")
check_timestamp(started_at)
finished_at = check_and_get_attribute(data, "finished_at")
check_timestamp(finished_at)
actual_ecosystem = check_and_get_attribute(data, "ecosystem")
assert ecosystem == actual_ecosystem, "Ecosystem {e1} differs from expected " \
"ecosystem {e2}".format(e1=actual_ecosystem, e2=ecosystem)
actual_package = check_and_get_attribute(data, "package")
assert package == actual_package, "Package {p1} differs from expected " \
"package {p2}".format(p1=actual_package, p2=package)
actual_version = check_and_get_attribute(data, "version")
assert version == actual_version, "Version {v1} differs from expected " \
"version {v2}".format(v1=actual_version, v2=version)
actual_release = check_and_get_attribute(data, "release")
release = release_string(ecosystem, package, version)
assert actual_release == release, "Release string {r1} differs from expected " \
"value {r2}".format(r1=actual_release, r2=release)
# the following attributes are expected to be presented for all component toplevel metadata
attributes_to_check = ["id", "analyses", "audit", "dependents_count", "latest_version",
"package_info", "subtasks"]
check_attributes_presence(data, attributes_to_check)
# NOTE: 'analyses' subnode has to be checked in explicit test steps
def _node_items_to_check(context, items, node):
expected_items = split_comma_separated_list(items)
assert expected_items is not None
data = context.s3_data
returned_items = check_and_get_attribute(data, node)
assert returned_items is not None
return returned_items, expected_items
@then('I should find the following items ({items}) in the {node} node')
def check_expected_items_in_node(context, items, node):
"""Check if all expected items can be found in given node."""
returned_items, expected_items = _node_items_to_check(context, items, node)
check_attributes_presence(returned_items, expected_items)
@then('I should not find any items apart from ({items}) in the {node} node')
def check_unexpected_items_in_node(context, items, node):
"""Check that only expected items can be found in given node."""
returned_items, expected_items = _node_items_to_check(context, items, node)
for item in returned_items:
# check that the item is contained in a list of expected items
if item not in expected_items:
print(item)
raise Exception("Unexpected item has been found: {item}".format(
item=item))
@then('I should find that the latest component version is {version}')
def check_component_latest_version(context, version):
"""Check the value of attribute 'latest_version' stored in component metadata."""
data = context.s3_data
latest_version = check_and_get_attribute(data, "latest_version")
assert version == latest_version, "Latest version should be set to {v1}, " \
"but {v2} has been found instead".format(v1=version, v2=latest_version)
@then('I should find the correct dependency snapshot data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_dependency_snapshot_data(context, package, version, ecosystem):
"""Check the dependency snapshot metadata for the given component."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "dependency_snapshot", "1-0-0")
check_status_attribute(data)
check_summary_attribute(data)
@then('I should find {num:d} runtime details in dependency snapshot')
def check_runtime_dependency_count(context, num):
"""Check the number of runtime details for selected component."""
data = context.s3_data
details = check_and_get_attribute(data, "details")
runtime = check_and_get_attribute(details, "runtime")
cnt = len(runtime)
assert cnt == num, "Expected {n1} runtime details, but found {n2}".format(n1=num, n2=cnt)
@then('I should find {num:d} dependencies in dependency snapshot summary')
def check_runtime_dependency_count_in_summary(context, num):
"""Check the number of dependencies in dependency snapshot summary."""
data = context.s3_data
summary = check_and_get_attribute(data, "summary")
dependency_counts = check_and_get_attribute(summary, "dependency_counts")
runtime_count = check_and_get_attribute(dependency_counts, "runtime")
cnt = int(runtime_count)
assert cnt == num, "Expected {n1} runtime dependency counts, but found {n2}".format(
n1=num, n2=cnt)
@then('I should find the correct digest data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_digest_data(context, package, version, ecosystem):
"""Check the digest data for the given package, version, and ecosystem."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "digests", "1-0-0")
check_status_attribute(data)
check_summary_attribute(data)
check_attribute_presence(data, "details")
@then('I should find digest metadata {selector} set to {expected_value}')
def check_component_digest_metadata_value(context, selector, expected_value):
"""Check if the digest metadata can be found for the component."""
data = context.s3_data
details = check_and_get_attribute(data, "details")
for detail in details:
actual_value = check_and_get_attribute(detail, selector)
if actual_value == expected_value:
return
# nothing was found
raise Exception('Can not find the digest metadata {selector} set to {expected_value}'.format(
selector=selector, expected_value=expected_value))
@then('I should find the correct keywords tagging data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_keywords_tagging_data(context, package, version, ecosystem):
"""Check the keywords tagging metadata for given component."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
# no schema to check (yet?)
# tracked here: https://github.com/openshiftio/openshift.io/issues/1074
check_status_attribute(data)
check_summary_attribute(data)
@then('I should find the correct metadata for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_metadata_data(context, package, version, ecosystem):
"""Check the basic component metadata in the AWS S3 database."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "metadata", "3-2-0")
check_status_attribute(data)
check_summary_attribute(data)
@then('I should find that author of this project is {author}')
def check_package_author(context, author):
"""Check the author of component."""
details = get_details_node(context)[0]
actual_author = check_and_get_attribute(details, "author")
assert actual_author.startswith(author), "Expected author {a1}, " \
"but {a2} has been found instead".format(a1=author, a2=actual_author)
@then('I should find that the project use {vcs} as a version control system')
def check_vsc(context, vcs):
"""Check the type of version control system for the component."""
details = get_details_node(context)[0]
code_repository = check_and_get_attribute(details, "code_repository")
actual_vcs = check_and_get_attribute(code_repository, "type")
assert actual_vcs == vcs.lower(), "Expected {v1} version control system type, " \
"but {v2} has been found instead".format(v1=vcs, v2=actual_vcs)
@then('I should find that the repository can be found at {url}')
def check_repository_url(context, url):
"""Check the repository URL (if set) for the component."""
details = get_details_node(context)[0]
code_repository = check_and_get_attribute(details, "code_repository")
actual_url = check_and_get_attribute(code_repository, "url")
assert actual_url == url, "Repository URL should be set to {u1}, " \
"but {u2} has been found instead".format(u1=url, u2=actual_url)
@then('I should find that the project homepage can be found at {url}')
def check_project_homepage(context, url):
"""Check the project homepage (if exist) for the component."""
details = get_details_node(context)[0]
actual_homepage = check_and_get_attribute(details, "homepage")
assert actual_homepage == url, "Homepage URL should be set to {u1}, " \
"but {u2} has been found instead".format(u1=url, u2=actual_homepage)
@then('I should find that the package description is {description}')
def check_project_description(context, description):
"""Check the package description existence and content."""
details = get_details_node(context)[0]
actual_description = check_and_get_attribute(details, "description")
assert actual_description == description, "Description is set to {d1}, " \
"but {d2} is expected".format(d1=actual_description, d2=description)
@then('I should find that the package name is {name} and version is {version}')
def check_package_name_and_version(context, name, version):
"""Check the package name and version."""
details = get_details_node(context)[0]
actual_name = check_and_get_attribute(details, "name")
actual_version = check_and_get_attribute(details, "version")
assert name == actual_name, "Name '{n1}' is different from " \
"expected name '{n2}'".format(n1=actual_name, n2=name)
assert version == actual_version, "Version {v1} is different from expected " \
"version {v2}".format(v1=actual_version, v2=version)
@then('I should find the correct security issues data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_security_issues_data(context, package, version, ecosystem):
"""Check the security issues metadata fro given component."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "security_issues", "3-0-1")
check_status_attribute(data)
check_summary_attribute(data)
details = check_and_get_attribute(data, "details")
assert type(details) is list
@then('I should find the correct source licenses data for package {package} version {version} '
'from ecosystem {ecosystem}')
def check_component_source_licenses_data(context, package, version, ecosystem):
"""Check that the component has assigned correct source licenses metadata."""
data = context.s3_data
check_audit_metadata(data)
check_release_attribute(data, ecosystem, package, version)
check_schema_attribute(data, "source_licenses", "3-0-0")
check_status_attribute(data)
check_summary_attribute(data)
@then('I should find that the package uses {license} license')
def check_package_license(context, license):
"""Check that the package has assigned given license."""
details = get_details_node(context)
licenses = check_and_get_attribute(details, "licenses")
assert license in licenses, "Can not find license {lic}".format(lic=license)
@when('I read component toplevel metadata for the package {package} version {version} in ecosystem '
'{ecosystem} from the AWS S3 database bucket {bucket}')
def read_core_data_from_bucket(context, package, version, ecosystem, bucket):
"""Read the component toplevel metadata."""
key = S3Interface.component_key(ecosystem, package, version)
s3_data = context.s3interface.read_object(bucket, key)
assert s3_data is not None
context.s3_data = s3_data
|
"""
Config file for survey creation
"""
class config:
# To modify, just add the keys of the dictionary
header_to_modify = [{'class': 'S', 'name': 'sid', 'text': '421498'},
{'class': 'S', 'name': 'admin_email', 'text': 'olivier.philippe@soton.ac.uk'},
{'class': 'S', 'name': 'allowprev', 'text': 'Y'}]
# Add header and description. Tuple of a dictionary + the position where it is supposed
# to be inserted
header_to_add = [({'class': 'S', 'name': 'additional_languages', 'text': 'de-informal'}, 12)]
# Same as header_to_modify
settings_to_modify = []
settings_to_add = []
languages_to_add = 'de-informal'
# The index positions starts at 0
# Adding the survey title to the global description. The index position is at 0 and the structure of the dictionary is as follow:
survey_title = {'en': 'Study about people writing scientic software',
'de-informal': ''}
sections_txt = {0: {'en': {'name': 'Questions about you', 'text': ''},
'de-informal': {'name': 'Fragen zu Deiner Person', 'text': ''}},
1: {'en': {'name': 'Your current employment', 'text': ''},
'de-informal': {'name': 'Fragen zu Deiner jetzigen Anstellung', 'text': ''}},
2: {'en': {'name': 'Your employment history', 'text': ''},
'de-informal': {'name': 'Fragen zu Deinen bisherigen Anstellung(en)', 'text': ''}},
3: {'en': {'name': 'Your working practices', 'text': ''},
'de-informal': {'name': 'Fragen zu Deinen Arbeitsgewohnheiten', 'text': ''}},
4: {'en': {'name': 'Your perception of your current position', 'text': ''},
'de-informal': {'name': 'Deine Wahrnehmung Deiner aktuellen Position', 'text': ''}},
5: {'en': {'name': 'Demographic questions', 'text': ''},
'de-informal': {'name': 'Demographische Fragen', 'text': ''}},
6: {'en': {'name': 'Final questions about you', 'text': ''},
'de-informal': {'name': 'Die letzen Fragen über Dich', 'text': ''}}}
private_data = {'en': 'IMPORTANT: This information will not be made publicly available',
'de-informal': 'IMPORTANT: This information will not be made publicly available [DE-INFORMAL]'}
Add french translation to the headers
"""
Config file for survey creation
"""
class config:
# To modify, just add the keys of the dictionary
header_to_modify = [{'class': 'S', 'name': 'sid', 'text': '421498'},
{'class': 'S', 'name': 'admin_email', 'text': 'olivier.philippe@soton.ac.uk'},
{'class': 'S', 'name': 'allowprev', 'text': 'Y'}]
# Add header and description. Tuple of a dictionary + the position where it is supposed
# to be inserted
header_to_add = [({'class': 'S', 'name': 'additional_languages', 'text': 'de-informal'}, 12)]
# Same as header_to_modify
settings_to_modify = []
settings_to_add = []
languages_to_add = ['de-informal', 'fr']
# The index positions starts at 0
# Adding the survey title to the global description. The index position is at 0 and the structure of the dictionary is as follow:
survey_title = {'en': 'Study about people writing scientic software',
'de-informal': '',
'fr': 'Etude sur les personnes écrivant des programmes informatiques scientifiques'}
sections_txt = {0: {'en': {'name': 'Questions about you', 'text': ''},
'de-informal': {'name': 'Fragen zu Deiner Person', 'text': ''},
'fr': {'name': 'Questions à propos de vous', 'text': ''}},
1: {'en': {'name': 'Your current employment', 'text': ''},
'de-informal': {'name': 'Fragen zu Deiner jetzigen Anstellung', 'text': ''},
'fr': {'name': 'Votre position actuelle', 'text': ''}},
2: {'en': {'name': 'Your employment history', 'text': ''},
'de-informal': {'name': 'Fragen zu Deinen bisherigen Anstellung(en)', 'text': ''},
'fr': {'name': 'Votre passé professionel', 'text': ''}},
3: {'en': {'name': 'Your working practices', 'text': ''},
'de-informal': {'name': 'Fragen zu Deinen Arbeitsgewohnheiten', 'text': ''},
'fr': {'name': 'Vos pratiques profesionnelles', 'text': ''}},
4: {'en': {'name': 'Your perception of your current position', 'text': ''},
'de-informal': {'name': 'Deine Wahrnehmung Deiner aktuellen Position', 'text': ''},
'fr': {'name': 'Votre perception sur votre position actuelle', 'text': ''}},
5: {'en': {'name': 'Demographic questions', 'text': ''},
'de-informal': {'name': 'Demographische Fragen', 'text': ''},
'fr': {'name': 'Questions démographiques', 'text': ''}},
6: {'en': {'name': 'Final questions about you', 'text': ''},
'de-informal': {'name': 'Die letzen Fragen über Dich', 'text': ''},
'fr': {'name': 'Dernières questions a propos de vous', 'text': ''}}}
private_data = {'en': 'IMPORTANT: This information will not be made publicly available',
'de-informal': 'IMPORTANT: This information will not be made publicly available [DE-INFORMAL]',
'fr': 'IMPORTANT: Cette information ne sera pas rendue publique [FR]'}
|
# Copyright (c) 2017 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import os
import errno
import logging
import shutil
import uuid
from ..common import six
from ..core.json import json
from ..core.attr_dict import SyncedAttrDict
from .hashing import calc_id
from .utility import _mkdir_p
from .errors import DestinationExistsError
logger = logging.getLogger(__name__)
class Job(object):
"""The job instance is a handle to the data of a unique statepoint.
Application developers should usually not need to directly
instantiate this class, but use :meth:`~.project.Project.open_job`
instead."""
FN_MANIFEST = 'signac_statepoint.json'
"""The job's manifest filename.
The job manifest, this means a human-readable dump of the job's\
statepoint is stored in each workspace directory.
"""
FN_DOCUMENT = 'signac_job_document.json'
"The job's document filename."
def __init__(self, project, statepoint):
self._project = project
self._sp = None
self._statepoint = json.loads(json.dumps(statepoint))
self._id = calc_id(self.statepoint())
self._wd = os.path.join(project.workspace(), self._id)
self._fn_doc = os.path.join(self._wd, self.FN_DOCUMENT)
self._document = None
self._cwd = list()
def get_id(self):
"""The unique identifier for the job's statepoint.
:return: The job id.
:rtype: str"""
return self._id
def __hash__(self):
return hash(self._wd)
def __str__(self):
"Returns the job's id."
return str(self.get_id())
def __repr__(self):
return "{}(project={}, statepoint={})".format(
self.__class__.__module__ + '.' + self.__class__.__name__,
repr(self._project), self._statepoint)
def __eq__(self, other):
return hash(self) == hash(other)
def workspace(self):
"""Each job is associated with a unique workspace directory.
:return: The path to the job's workspace directory.
:rtype: str"""
return self._wd
@property
def ws(self):
"The job's workspace directory."
return self.workspace()
def reset_statepoint(self, new_statepoint):
"""Reset the state point of this job.
.. danger::
Use this function with caution! Resetting a job's state point,
may sometimes be necessary, but can possibly lead to incoherent
data spaces.
:param new_statepoint: The job's new state point.
:type new_statepoint: mapping
:raises DestinationExistsError:
If a job associated with the new state point is already initialized.
:raises OSError:
If the move failed due to an unknown system related error.
"""
dst = self._project.open_job(new_statepoint)
if dst == self:
return
fn_manifest = os.path.join(self.workspace(), self.FN_MANIFEST)
fn_manifest_backup = fn_manifest + '~'
try:
os.rename(fn_manifest, fn_manifest_backup)
try:
os.rename(self.workspace(), dst.workspace())
except OSError as error:
os.rename(fn_manifest_backup, fn_manifest) # rollback
if error.errno == errno.ENOTEMPTY:
raise DestinationExistsError(dst)
else:
raise
else:
dst.init()
except OSError as error:
if error.errno == errno.ENOENT:
pass # job is not initialized
else:
raise
logger.info("Moved '{}' -> '{}'.".format(self, dst))
dst._statepoint = self._statepoint
self.__dict__.update(dst.__dict__)
def _reset_sp(self, new_sp=None):
if new_sp is None:
new_sp = self.statepoint()
self.reset_statepoint(new_sp)
def update_statepoint(self, update, overwrite=False):
"""Update the statepoint of this job.
.. warning::
While appending to a job's state point is generally safe,
modifying existing parameters may lead to data
inconsistency. Use the overwrite argument with caution!
:param update: A mapping used for the statepoint update.
:type update: mapping
:param overwrite:
Set to true, to ignore whether this update overwrites parameters,
which are currently part of the job's state point. Use with caution!
:raises KeyError:
If the update contains keys, which are already part of the job's
state point and overwrite is False.
:raises DestinationExistsError:
If a job associated with the new state point is already initialized.
:raises OSError:
If the move failed due to an unknown system related error.
"""
statepoint = self.statepoint()
if not overwrite:
for key, value in update.items():
if statepoint.get(key, value) != value:
raise KeyError(key)
statepoint.update(update)
self.reset_statepoint(statepoint)
@property
def statepoint(self):
"Access the job's state point as attribute dictionary."
if self._sp is None:
self._sp = SyncedAttrDict(self._statepoint, load=None, save=self._reset_sp)
return self._sp
@statepoint.setter
def statepoint(self, new_sp):
self._reset_sp(new_sp)
@property
def sp(self):
return self.statepoint
@sp.setter
def sp(self, new_sp):
self.statepoint = new_sp
def _read_document(self):
try:
with open(self._fn_doc, 'rb') as file:
return json.loads(file.read().decode())
except FileNotFoundError as e:
return dict()
def _reset_document(self, new_doc=None):
if new_doc is None:
new_doc = self.document()
dirname, filename = os.path.split(self._fn_doc)
fn_tmp = os.path.join(dirname, '._{uid}_{fn}'.format(
uid=uuid.uuid4(), fn=filename))
with open(fn_tmp, 'wb') as tmpfile:
tmpfile.write(json.dumps(new_doc).encode())
if six.PY2:
os.rename(fn_tmp, self._fn_doc)
else:
os.replace(fn_tmp, self._fn_doc)
@property
def document(self):
"""The document associated with this job.
:return: The job document handle.
:rtype: :class:`~.JSonDict`"""
if self._document is None:
self._create_directory()
self._document = SyncedAttrDict(
self._read_document(), load=self._read_document, save=self._reset_document)
return self._document
@document.setter
def document(self, new_doc):
self._reset_document(new_doc)
@property
def doc(self):
return self.document
@doc.setter
def doc(self, new_doc):
self.document = new_doc
def _create_directory(self, overwrite=False):
"Create the workspace directory and write the manifest file."
fn_manifest = os.path.join(self.workspace(), self.FN_MANIFEST)
# Create the workspace directory if it did not exist yet.
_mkdir_p(self.workspace())
try:
# Ensure to create the binary to write before file creation
blob = json.dumps(self.statepoint(), indent=2)
try:
# Open the file for writing only if it does not exist yet.
if six.PY2:
# Adapted from: http://stackoverflow.com/questions/10978869/
if overwrite:
flags = os.O_CREAT | os.O_WRONLY
else:
flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL
try:
fd = os.open(fn_manifest, flags)
except OSError as error:
if error.errno != errno.EEXIST:
raise
else:
with os.fdopen(fd, 'w') as file:
file.write(blob)
else:
with open(fn_manifest, 'w' if overwrite else 'x') as file:
file.write(blob)
except IOError as error:
if not error.errno == errno.EEXIST:
raise
except Exception as error:
# Attempt to delete the file on error, to prevent corruption.
try:
os.remove(fn_manifest)
except Exception: # ignore all errors here
pass
raise error
else:
self._check_manifest()
def _check_manifest(self):
"Check whether the manifest file, if it exists, is correct."
fn_manifest = os.path.join(self.workspace(), self.FN_MANIFEST)
try:
try:
with open(fn_manifest) as file:
assert calc_id(json.loads(file.read())) == self._id
except IOError as error:
if not error.errno == errno.ENOENT:
raise error
except Exception as error:
msg = "Manifest file of job '{}' is corrupted: {}."
raise RuntimeError(msg.format(self, error))
def init(self):
"""Initialize the job's workspace directory.
This function will do nothing if the directory and
the job manifest already exist."""
self._create_directory()
def remove(self):
"""Remove the job's workspace including the job document.
This function will do nothing if the workspace directory
does not exist."""
try:
shutil.rmtree(self.workspace())
except OSError as error:
if error.errno != errno.ENOENT:
raise
else:
if self._document is not None:
try:
self._document.clear()
except IOError as error:
if not error.errno == errno.ENOENT:
raise error
self._document = None
def move(self, project):
"""Move this job to project.
This function will attempt to move this instance of job from
its original project to a different project.
:param project: The project to move this job to.
:type project: :py:class:`~.project.Project`
:raises DestinationExistsError: If the job is already initialized in project.
"""
dst = project.open_job(self.statepoint())
_mkdir_p(project.workspace())
try:
os.rename(self.workspace(), dst.workspace())
except OSError:
raise DestinationExistsError(dst)
self.__dict__.update(dst.__dict__)
def fn(self, filename):
"""Prepend a filename with the job's workspace directory path.
:param filename: The filename of the file.
:type filename: str
:return: The full workspace path of the file."""
return os.path.join(self.workspace(), filename)
def isfile(self, filename):
"""Return True if file exists in the job's workspace.
:param filename: The filename of the file.
:type filename: str
:return: True if file with filename exists in workspace.
:rtype: bool"""
return os.path.isfile(self.fn(filename))
def open(self):
"""Enter the job's workspace directory.
You can use the :class:`~.Job` class as context manager:
.. code-block:: python
with project.open_job(my_statepoint) as job:
# manipulate your job data
Opening the context will switch into the job's workspace,
leaving it will switch back to the previous working directory.
"""
self._cwd.append(os.getcwd())
self._create_directory()
logger.info("Enter workspace '{}'.".format(self.workspace()))
os.chdir(self.workspace())
def close(self):
"Close the job and switch to the previous working directory."
try:
os.chdir(self._cwd.pop())
logger.info("Leave workspace.")
except IndexError:
pass
def __enter__(self):
self.open()
return self
def __exit__(self, err_type, err_value, tb):
self.close()
return False
Fix py27 issue in job module.
# Copyright (c) 2017 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import os
import errno
import logging
import shutil
import uuid
from ..common import six
from ..core.json import json
from ..core.attr_dict import SyncedAttrDict
from .hashing import calc_id
from .utility import _mkdir_p
from .errors import DestinationExistsError
logger = logging.getLogger(__name__)
class Job(object):
"""The job instance is a handle to the data of a unique statepoint.
Application developers should usually not need to directly
instantiate this class, but use :meth:`~.project.Project.open_job`
instead."""
FN_MANIFEST = 'signac_statepoint.json'
"""The job's manifest filename.
The job manifest, this means a human-readable dump of the job's\
statepoint is stored in each workspace directory.
"""
FN_DOCUMENT = 'signac_job_document.json'
"The job's document filename."
def __init__(self, project, statepoint):
self._project = project
self._sp = None
self._statepoint = json.loads(json.dumps(statepoint))
self._id = calc_id(self.statepoint())
self._wd = os.path.join(project.workspace(), self._id)
self._fn_doc = os.path.join(self._wd, self.FN_DOCUMENT)
self._document = None
self._cwd = list()
def get_id(self):
"""The unique identifier for the job's statepoint.
:return: The job id.
:rtype: str"""
return self._id
def __hash__(self):
return hash(self._wd)
def __str__(self):
"Returns the job's id."
return str(self.get_id())
def __repr__(self):
return "{}(project={}, statepoint={})".format(
self.__class__.__module__ + '.' + self.__class__.__name__,
repr(self._project), self._statepoint)
def __eq__(self, other):
return hash(self) == hash(other)
def workspace(self):
"""Each job is associated with a unique workspace directory.
:return: The path to the job's workspace directory.
:rtype: str"""
return self._wd
@property
def ws(self):
"The job's workspace directory."
return self.workspace()
def reset_statepoint(self, new_statepoint):
"""Reset the state point of this job.
.. danger::
Use this function with caution! Resetting a job's state point,
may sometimes be necessary, but can possibly lead to incoherent
data spaces.
:param new_statepoint: The job's new state point.
:type new_statepoint: mapping
:raises DestinationExistsError:
If a job associated with the new state point is already initialized.
:raises OSError:
If the move failed due to an unknown system related error.
"""
dst = self._project.open_job(new_statepoint)
if dst == self:
return
fn_manifest = os.path.join(self.workspace(), self.FN_MANIFEST)
fn_manifest_backup = fn_manifest + '~'
try:
os.rename(fn_manifest, fn_manifest_backup)
try:
os.rename(self.workspace(), dst.workspace())
except OSError as error:
os.rename(fn_manifest_backup, fn_manifest) # rollback
if error.errno == errno.ENOTEMPTY:
raise DestinationExistsError(dst)
else:
raise
else:
dst.init()
except OSError as error:
if error.errno == errno.ENOENT:
pass # job is not initialized
else:
raise
logger.info("Moved '{}' -> '{}'.".format(self, dst))
dst._statepoint = self._statepoint
self.__dict__.update(dst.__dict__)
def _reset_sp(self, new_sp=None):
if new_sp is None:
new_sp = self.statepoint()
self.reset_statepoint(new_sp)
def update_statepoint(self, update, overwrite=False):
"""Update the statepoint of this job.
.. warning::
While appending to a job's state point is generally safe,
modifying existing parameters may lead to data
inconsistency. Use the overwrite argument with caution!
:param update: A mapping used for the statepoint update.
:type update: mapping
:param overwrite:
Set to true, to ignore whether this update overwrites parameters,
which are currently part of the job's state point. Use with caution!
:raises KeyError:
If the update contains keys, which are already part of the job's
state point and overwrite is False.
:raises DestinationExistsError:
If a job associated with the new state point is already initialized.
:raises OSError:
If the move failed due to an unknown system related error.
"""
statepoint = self.statepoint()
if not overwrite:
for key, value in update.items():
if statepoint.get(key, value) != value:
raise KeyError(key)
statepoint.update(update)
self.reset_statepoint(statepoint)
@property
def statepoint(self):
"Access the job's state point as attribute dictionary."
if self._sp is None:
self._sp = SyncedAttrDict(self._statepoint, load=None, save=self._reset_sp)
return self._sp
@statepoint.setter
def statepoint(self, new_sp):
self._reset_sp(new_sp)
@property
def sp(self):
return self.statepoint
@sp.setter
def sp(self, new_sp):
self.statepoint = new_sp
def _read_document(self):
try:
with open(self._fn_doc, 'rb') as file:
return json.loads(file.read().decode())
except IOError as error:
if error.errno != errno.ENOENT:
raise
return dict()
def _reset_document(self, new_doc=None):
if new_doc is None:
new_doc = self.document()
dirname, filename = os.path.split(self._fn_doc)
fn_tmp = os.path.join(dirname, '._{uid}_{fn}'.format(
uid=uuid.uuid4(), fn=filename))
with open(fn_tmp, 'wb') as tmpfile:
tmpfile.write(json.dumps(new_doc).encode())
if six.PY2:
os.rename(fn_tmp, self._fn_doc)
else:
os.replace(fn_tmp, self._fn_doc)
@property
def document(self):
"""The document associated with this job.
:return: The job document handle.
:rtype: :class:`~.JSonDict`"""
if self._document is None:
self._create_directory()
self._document = SyncedAttrDict(
self._read_document(), load=self._read_document, save=self._reset_document)
return self._document
@document.setter
def document(self, new_doc):
self._reset_document(new_doc)
@property
def doc(self):
return self.document
@doc.setter
def doc(self, new_doc):
self.document = new_doc
def _create_directory(self, overwrite=False):
"Create the workspace directory and write the manifest file."
fn_manifest = os.path.join(self.workspace(), self.FN_MANIFEST)
# Create the workspace directory if it did not exist yet.
_mkdir_p(self.workspace())
try:
# Ensure to create the binary to write before file creation
blob = json.dumps(self.statepoint(), indent=2)
try:
# Open the file for writing only if it does not exist yet.
if six.PY2:
# Adapted from: http://stackoverflow.com/questions/10978869/
if overwrite:
flags = os.O_CREAT | os.O_WRONLY
else:
flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL
try:
fd = os.open(fn_manifest, flags)
except OSError as error:
if error.errno != errno.EEXIST:
raise
else:
with os.fdopen(fd, 'w') as file:
file.write(blob)
else:
with open(fn_manifest, 'w' if overwrite else 'x') as file:
file.write(blob)
except IOError as error:
if not error.errno == errno.EEXIST:
raise
except Exception as error:
# Attempt to delete the file on error, to prevent corruption.
try:
os.remove(fn_manifest)
except Exception: # ignore all errors here
pass
raise error
else:
self._check_manifest()
def _check_manifest(self):
"Check whether the manifest file, if it exists, is correct."
fn_manifest = os.path.join(self.workspace(), self.FN_MANIFEST)
try:
try:
with open(fn_manifest) as file:
assert calc_id(json.loads(file.read())) == self._id
except IOError as error:
if error.errno != errno.ENOENT:
raise error
except Exception as error:
msg = "Manifest file of job '{}' is corrupted: {}."
raise RuntimeError(msg.format(self, error))
def init(self):
"""Initialize the job's workspace directory.
This function will do nothing if the directory and
the job manifest already exist."""
self._create_directory()
def remove(self):
"""Remove the job's workspace including the job document.
This function will do nothing if the workspace directory
does not exist."""
try:
shutil.rmtree(self.workspace())
except OSError as error:
if error.errno != errno.ENOENT:
raise
else:
if self._document is not None:
try:
self._document.clear()
except IOError as error:
if not error.errno == errno.ENOENT:
raise error
self._document = None
def move(self, project):
"""Move this job to project.
This function will attempt to move this instance of job from
its original project to a different project.
:param project: The project to move this job to.
:type project: :py:class:`~.project.Project`
:raises DestinationExistsError: If the job is already initialized in project.
"""
dst = project.open_job(self.statepoint())
_mkdir_p(project.workspace())
try:
os.rename(self.workspace(), dst.workspace())
except OSError:
raise DestinationExistsError(dst)
self.__dict__.update(dst.__dict__)
def fn(self, filename):
"""Prepend a filename with the job's workspace directory path.
:param filename: The filename of the file.
:type filename: str
:return: The full workspace path of the file."""
return os.path.join(self.workspace(), filename)
def isfile(self, filename):
"""Return True if file exists in the job's workspace.
:param filename: The filename of the file.
:type filename: str
:return: True if file with filename exists in workspace.
:rtype: bool"""
return os.path.isfile(self.fn(filename))
def open(self):
"""Enter the job's workspace directory.
You can use the :class:`~.Job` class as context manager:
.. code-block:: python
with project.open_job(my_statepoint) as job:
# manipulate your job data
Opening the context will switch into the job's workspace,
leaving it will switch back to the previous working directory.
"""
self._cwd.append(os.getcwd())
self._create_directory()
logger.info("Enter workspace '{}'.".format(self.workspace()))
os.chdir(self.workspace())
def close(self):
"Close the job and switch to the previous working directory."
try:
os.chdir(self._cwd.pop())
logger.info("Leave workspace.")
except IndexError:
pass
def __enter__(self):
self.open()
return self
def __exit__(self, err_type, err_value, tb):
self.close()
return False
|
# coding=utf-8
import hashlib
import hmac
import requests
import time
from operator import itemgetter
from .helpers import date_to_milliseconds, interval_to_milliseconds
from .exceptions import BinanceAPIException, BinanceRequestException, BinanceWithdrawException
class Client(object):
API_URL = 'https://api.binance.{}/api'
WITHDRAW_API_URL = 'https://api.binance.{}/wapi'
MARGIN_API_URL = 'https://api.binance.{}/sapi'
WEBSITE_URL = 'https://www.binance.{}'
FUTURES_URL = 'https://fapi.binance.{}/fapi'
FUTURES_DATA_URL = 'https://fapi.binance.{}/futures/data'
FUTURES_COIN_URL = "https://dapi.binance.{}/dapi"
FUTURES_COIN_DATA_URL = "https://dapi.binance.{}/futures/data"
PUBLIC_API_VERSION = 'v1'
PRIVATE_API_VERSION = 'v3'
WITHDRAW_API_VERSION = 'v3'
MARGIN_API_VERSION = 'v1'
FUTURES_API_VERSION = 'v1'
FUTURES_API_VERSION2 = "v2"
SYMBOL_TYPE_SPOT = 'SPOT'
ORDER_STATUS_NEW = 'NEW'
ORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED'
ORDER_STATUS_FILLED = 'FILLED'
ORDER_STATUS_CANCELED = 'CANCELED'
ORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL'
ORDER_STATUS_REJECTED = 'REJECTED'
ORDER_STATUS_EXPIRED = 'EXPIRED'
KLINE_INTERVAL_1MINUTE = '1m'
KLINE_INTERVAL_3MINUTE = '3m'
KLINE_INTERVAL_5MINUTE = '5m'
KLINE_INTERVAL_15MINUTE = '15m'
KLINE_INTERVAL_30MINUTE = '30m'
KLINE_INTERVAL_1HOUR = '1h'
KLINE_INTERVAL_2HOUR = '2h'
KLINE_INTERVAL_4HOUR = '4h'
KLINE_INTERVAL_6HOUR = '6h'
KLINE_INTERVAL_8HOUR = '8h'
KLINE_INTERVAL_12HOUR = '12h'
KLINE_INTERVAL_1DAY = '1d'
KLINE_INTERVAL_3DAY = '3d'
KLINE_INTERVAL_1WEEK = '1w'
KLINE_INTERVAL_1MONTH = '1M'
SIDE_BUY = 'BUY'
SIDE_SELL = 'SELL'
ORDER_TYPE_LIMIT = 'LIMIT'
ORDER_TYPE_MARKET = 'MARKET'
ORDER_TYPE_STOP_LOSS = 'STOP_LOSS'
ORDER_TYPE_STOP_LOSS_LIMIT = 'STOP_LOSS_LIMIT'
ORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'
ORDER_TYPE_TAKE_PROFIT_LIMIT = 'TAKE_PROFIT_LIMIT'
ORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER'
TIME_IN_FORCE_GTC = 'GTC' # Good till cancelled
TIME_IN_FORCE_IOC = 'IOC' # Immediate or cancel
TIME_IN_FORCE_FOK = 'FOK' # Fill or kill
ORDER_RESP_TYPE_ACK = 'ACK'
ORDER_RESP_TYPE_RESULT = 'RESULT'
ORDER_RESP_TYPE_FULL = 'FULL'
# For accessing the data returned by Client.aggregate_trades().
AGG_ID = 'a'
AGG_PRICE = 'p'
AGG_QUANTITY = 'q'
AGG_FIRST_TRADE_ID = 'f'
AGG_LAST_TRADE_ID = 'l'
AGG_TIME = 'T'
AGG_BUYER_MAKES = 'm'
AGG_BEST_MATCH = 'M'
# new asset transfer api enum
SPOT_TO_FIAT = "MAIN_C2C"
SPOT_TO_USDT_FUTURE = "MAIN_UMFUTURE"
SPOT_TO_COIN_FUTURE = "MAIN_CMFUTURE"
SPOT_TO_MARGIN_CROSS = "MAIN_MARGIN"
SPOT_TO_MINING = "MAIN_MINING"
FIAT_TO_SPOT = "C2C_MAIN"
FIAT_TO_USDT_FUTURE = "C2C_UMFUTURE"
FIAT_TO_MINING = "C2C_MINING"
USDT_FUTURE_TO_SPOT = "UMFUTURE_MAIN"
USDT_FUTURE_TO_FIAT = "UMFUTURE_C2C"
USDT_FUTURE_TO_MARGIN_CROSS = "UMFUTURE_MARGIN"
COIN_FUTURE_TO_SPOT = "CMFUTURE_MAIN"
MARGIN_CROSS_TO_SPOT = "MARGIN_MAIN"
MARGIN_CROSS_TO_USDT_FUTURE = "MARGIN_UMFUTURE"
MINING_TO_SPOT = "MINING_MAIN"
MINING_TO_USDT_FUTURE = "MINING_UMFUTURE"
MINING_TO_FIAT = "MINING_C2C"
def __init__(self, api_key=None, api_secret=None, requests_params=None, tld='com'):
"""Binance API Client constructor
:param api_key: Api Key
:type api_key: str.
:param api_secret: Api Secret
:type api_secret: str.
:param requests_params: optional - Dictionary of requests params to use for all calls
:type requests_params: dict.
"""
self.API_URL = self.API_URL.format(tld)
self.WITHDRAW_API_URL = self.WITHDRAW_API_URL.format(tld)
self.MARGIN_API_URL = self.MARGIN_API_URL.format(tld)
self.WEBSITE_URL = self.WEBSITE_URL.format(tld)
self.FUTURES_URL = self.FUTURES_URL.format(tld)
self.FUTURES_DATA_URL = self.FUTURES_DATA_URL.format(tld)
self.FUTURES_COIN_URL = self.FUTURES_COIN_URL.format(tld)
self.FUTURES_COIN_DATA_URL = self.FUTURES_COIN_DATA_URL.format(tld)
self.API_KEY = api_key
self.API_SECRET = api_secret
self.session = self._init_session()
self._requests_params = requests_params
self.response = None
self.timestamp_offset = 0
# init DNS and SSL cert
self.ping()
# calculate timestamp offset between local and binance server
res = self.get_server_time()
self.timestamp_offset = res['serverTime'] - int(time.time() * 1000)
def _init_session(self):
session = requests.session()
session.headers.update({'Accept': 'application/json',
'User-Agent': 'binance/python',
'X-MBX-APIKEY': self.API_KEY})
return session
def _create_api_uri(self, path, signed=True, version=PUBLIC_API_VERSION):
v = self.PRIVATE_API_VERSION if signed else version
return self.API_URL + '/' + v + '/' + path
def _create_withdraw_api_uri(self, path):
return self.WITHDRAW_API_URL + '/' + self.WITHDRAW_API_VERSION + '/' + path
def _create_margin_api_uri(self, path):
return self.MARGIN_API_URL + '/' + self.MARGIN_API_VERSION + '/' + path
def _create_website_uri(self, path):
return self.WEBSITE_URL + '/' + path
def _create_futures_api_uri(self, path):
return self.FUTURES_URL + '/' + self.FUTURES_API_VERSION + '/' + path
def _create_futures_data_api_uri(self, path):
return self.FUTURES_DATA_URL + '/' + path
def _create_futures_coin_api_url(self, path, version=1):
options = {1: self.FUTURES_API_VERSION, 2: self.FUTURES_API_VERSION2}
return self.FUTURES_COIN_URL + "/" + options[version] + "/" + path
def _create_futures_coin_data_api_url(self, path, version=1):
return self.FUTURES_COIN_DATA_URL + "/" + path
def _generate_signature(self, data):
ordered_data = self._order_params(data)
query_string = '&'.join(["{}={}".format(d[0], d[1]) for d in ordered_data])
m = hmac.new(self.API_SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256)
return m.hexdigest()
def _order_params(self, data):
"""Convert params to list with signature as last element
:param data:
:return:
"""
has_signature = False
params = []
for key, value in data.items():
if key == 'signature':
has_signature = True
else:
params.append((key, value))
# sort parameters by key
params.sort(key=itemgetter(0))
if has_signature:
params.append(('signature', data['signature']))
return params
def _request(self, method, uri, signed, force_params=False, **kwargs):
# set default requests timeout
kwargs['timeout'] = 10
# add our global requests params
if self._requests_params:
kwargs.update(self._requests_params)
data = kwargs.get('data', None)
if data and isinstance(data, dict):
kwargs['data'] = data
# find any requests params passed and apply them
if 'requests_params' in kwargs['data']:
# merge requests params into kwargs
kwargs.update(kwargs['data']['requests_params'])
del(kwargs['data']['requests_params'])
if signed:
# generate signature
kwargs['data']['timestamp'] = int(time.time() * 1000 + self.timestamp_offset)
kwargs['data']['signature'] = self._generate_signature(kwargs['data'])
# sort get and post params to match signature order
if data:
# sort post params
kwargs['data'] = self._order_params(kwargs['data'])
# Remove any arguments with values of None.
null_args = [i for i, (key, value) in enumerate(kwargs['data']) if value is None]
for i in reversed(null_args):
del kwargs['data'][i]
# if get request assign data array to params value for requests lib
if data and (method == 'get' or force_params):
kwargs['params'] = '&'.join('%s=%s' % (data[0], data[1]) for data in kwargs['data'])
del(kwargs['data'])
self.response = getattr(self.session, method)(uri, **kwargs)
return self._handle_response()
def _request_api(self, method, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
uri = self._create_api_uri(path, signed, version)
return self._request(method, uri, signed, **kwargs)
def _request_withdraw_api(self, method, path, signed=False, **kwargs):
uri = self._create_withdraw_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_margin_api(self, method, path, signed=False, **kwargs):
uri = self._create_margin_api_uri(path)
return self._request(method, uri, signed, **kwargs)
def _request_website(self, method, path, signed=False, **kwargs):
uri = self._create_website_uri(path)
return self._request(method, uri, signed, **kwargs)
def _request_futures_api(self, method, path, signed=False, **kwargs):
uri = self._create_futures_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_futures_data_api(self, method, path, signed=False, **kwargs):
uri = self._create_futures_data_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_futures_coin_api(self, method, path, signed=False, version=1, **kwargs):
uri = self._create_futures_coin_api_url(path, version=version)
return self._request(method, uri, signed, True, **kwargs)
def _request_futures_coin_data_api(self, method, path, signed=False, version=1, **kwargs):
uri = self._create_futures_coin_data_api_url(path, version=version)
return self._request(method, uri, signed, True, **kwargs)
def _handle_response(self):
"""Internal helper for handling API responses from the Binance server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if not (200 <= self.response.status_code < 300):
raise BinanceAPIException(self.response)
try:
return self.response.json()
except ValueError:
raise BinanceRequestException('Invalid Response: %s' % self.response.text)
def _get(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('get', path, signed, version, **kwargs)
def _post(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('post', path, signed, version, **kwargs)
def _put(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('put', path, signed, version, **kwargs)
def _delete(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('delete', path, signed, version, **kwargs)
# Exchange Endpoints
def get_products(self):
"""Return list of products currently listed on Binance
Use get_exchange_info() call instead
:returns: list - List of product dictionaries
:raises: BinanceRequestException, BinanceAPIException
"""
products = self._request_website('get', 'exchange-api/v1/public/asset-service/product/get-products')
return products
def get_exchange_info(self):
"""Return rate limits and list of symbols
:returns: list - List of product dictionaries
.. code-block:: python
{
"timezone": "UTC",
"serverTime": 1508631584636,
"rateLimits": [
{
"rateLimitType": "REQUESTS",
"interval": "MINUTE",
"limit": 1200
},
{
"rateLimitType": "ORDERS",
"interval": "SECOND",
"limit": 10
},
{
"rateLimitType": "ORDERS",
"interval": "DAY",
"limit": 100000
}
],
"exchangeFilters": [],
"symbols": [
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('exchangeInfo', version=self.PRIVATE_API_VERSION)
def get_symbol_info(self, symbol):
"""Return information about a symbol
:param symbol: required e.g BNBBTC
:type symbol: str
:returns: Dict if found, None if not
.. code-block:: python
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self._get('exchangeInfo', version=self.PRIVATE_API_VERSION)
for item in res['symbols']:
if item['symbol'] == symbol.upper():
return item
return None
# General Endpoints
def ping(self):
"""Test connectivity to the Rest API.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#test-connectivity
:returns: Empty array
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ping', version=self.PRIVATE_API_VERSION)
def get_server_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#check-server-time
:returns: Current server time
.. code-block:: python
{
"serverTime": 1499827319559
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('time', version=self.PRIVATE_API_VERSION)
# Market Data Endpoints
def get_all_tickers(self):
"""Latest price for all symbols.
https://www.binance.com/restapipub.html#symbols-price-ticker
:returns: List of market tickers
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/price', version=self.PRIVATE_API_VERSION)
def get_orderbook_tickers(self):
"""Best price/qty on the order book for all symbols.
https://www.binance.com/restapipub.html#symbols-order-book-ticker
:returns: List of order book market entries
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/bookTicker', version=self.PRIVATE_API_VERSION)
def get_order_book(self, **params):
"""Get the Order Book for the market
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#order-book
:param symbol: required
:type symbol: str
:param limit: Default 100; max 1000
:type limit: int
:returns: API response
.. code-block:: python
{
"lastUpdateId": 1027024,
"bids": [
[
"4.00000000", # PRICE
"431.00000000", # QTY
[] # Can be ignored
]
],
"asks": [
[
"4.00000200",
"12.00000000",
[]
]
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('depth', data=params, version=self.PRIVATE_API_VERSION)
def get_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('trades', data=params)
def get_historical_trades(self, **params):
"""Get older trades.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: str
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('historicalTrades', data=params, version=self.PRIVATE_API_VERSION)
def get_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time,
from the same order, with the same price will have the quantity aggregated.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
:param symbol: required
:type symbol: str
:param fromId: ID to get aggregate trades from INCLUSIVE.
:type fromId: str
:param startTime: Timestamp in ms to get aggregate trades from INCLUSIVE.
:type startTime: int
:param endTime: Timestamp in ms to get aggregate trades until INCLUSIVE.
:type endTime: int
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"a": 26129, # Aggregate tradeId
"p": "0.01633102", # Price
"q": "4.70443515", # Quantity
"f": 27781, # First tradeId
"l": 27781, # Last tradeId
"T": 1498793709153, # Timestamp
"m": true, # Was the buyer the maker?
"M": true # Was the trade the best price match?
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('aggTrades', data=params, version=self.PRIVATE_API_VERSION)
def aggregate_trade_iter(self, symbol, start_str=None, last_id=None):
"""Iterate over aggregate trade data from (start_time or last_id) to
the end of the history so far.
If start_time is specified, start with the first trade after
start_time. Meant to initialise a local cache of trade data.
If last_id is specified, start with the trade after it. This is meant
for updating a pre-existing local trade data cache.
Only allows start_str or last_id—not both. Not guaranteed to work
right if you're running more than one of these simultaneously. You
will probably hit your rate limit.
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Symbol string e.g. ETHBTC
:type symbol: str
:param start_str: Start date string in UTC format or timestamp in milliseconds. The iterator will
return the first trade occurring later than this time.
:type start_str: str|int
:param last_id: aggregate trade ID of the last known aggregate trade.
Not a regular trade ID. See https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list.
:returns: an iterator of JSON objects, one per trade. The format of
each object is identical to Client.aggregate_trades().
:type last_id: int
"""
if start_str is not None and last_id is not None:
raise ValueError(
'start_time and last_id may not be simultaneously specified.')
# If there's no last_id, get one.
if last_id is None:
# Without a last_id, we actually need the first trade. Normally,
# we'd get rid of it. See the next loop.
if start_str is None:
trades = self.get_aggregate_trades(symbol=symbol, fromId=0)
else:
# The difference between startTime and endTime should be less
# or equal than an hour and the result set should contain at
# least one trade.
if type(start_str) == int:
start_ts = start_str
else:
start_ts = date_to_milliseconds(start_str)
# If the resulting set is empty (i.e. no trades in that interval)
# then we just move forward hour by hour until we find at least one
# trade or reach present moment
while True:
end_ts = start_ts + (60 * 60 * 1000)
trades = self.get_aggregate_trades(
symbol=symbol,
startTime=start_ts,
endTime=end_ts)
if len(trades) > 0:
break
# If we reach present moment and find no trades then there is
# nothing to iterate, so we're done
if end_ts > int(time.time() * 1000):
return
start_ts = end_ts
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
while True:
# There is no need to wait between queries, to avoid hitting the
# rate limit. We're using blocking IO, and as long as we're the
# only thread running calls like this, Binance will automatically
# add the right delay time on their end, forcing us to wait for
# data. That really simplifies this function's job. Binance is
# fucking awesome.
trades = self.get_aggregate_trades(symbol=symbol, fromId=last_id)
# fromId=n returns a set starting with id n, but we already have
# that one. So get rid of the first item in the result set.
trades = trades[1:]
if len(trades) == 0:
return
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
def get_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#klinecandlestick-data
:param symbol: required
:type symbol: str
:param interval: -
:type interval: str
:param limit: - Default 500; max 500.
:type limit: int
:param startTime:
:type startTime: int
:param endTime:
:type endTime: int
:returns: API response
.. code-block:: python
[
[
1499040000000, # Open time
"0.01634790", # Open
"0.80000000", # High
"0.01575800", # Low
"0.01577100", # Close
"148976.11427815", # Volume
1499644799999, # Close time
"2434.19055334", # Quote asset volume
308, # Number of trades
"1756.87402397", # Taker buy base asset volume
"28.46694368", # Taker buy quote asset volume
"17928899.62484339" # Can be ignored
]
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('klines', data=params, version=self.PRIVATE_API_VERSION)
def _klines(self, spot=True, **params):
"""Get respective kline function of futures (futures_klines) or spot (get_klines) endpoints.
:param spot: Spot klines functions, otherwise futures
:type spot: bool
:return: klines, see get_klines
"""
if spot:
return self.get_klines(**params)
else:
return self.futures_klines(**params)
def _get_earliest_valid_timestamp(self, symbol, interval, spot):
"""Get earliest valid open timestamp from Binance
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param spot: Spot endpoint, otherwise futures
:type spot: bool
:return: first valid timestamp
"""
kline = self._klines(spot=spot,
symbol=symbol,
interval=interval,
limit=1,
startTime=0,
endTime=None
)
return kline[0][0]
def get_historical_klines(self, symbol, interval, start_str, end_str=None,
limit=500):
"""Get Historical Klines from Binance
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:param limit: Default 500; max 1000.
:type limit: int
:return: list of OHLCV values
"""
return self._historical_klines(symbol, interval, start_str, end_str=None, limit=500, spot=True)
def _historical_klines(self, symbol, interval, start_str, end_str=None,
limit=500, spot=True):
"""Get Historical Klines from Binance
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:param limit: Default 500; max 1000.
:type limit: int
:param limit: Default 500; max 1000.
:type limit: int
:param spot: Historical klines from spot endpoint, otherwise futures
:type spot: bool
:return: list of OHLCV values
"""
# init our list
output_data = []
# setup the max limit
limit = limit
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
if type(start_str) == int:
start_ts = start_str
else:
start_ts = date_to_milliseconds(start_str)
# establish first available start timestamp
first_valid_ts = self._get_earliest_valid_timestamp(symbol, interval, spot)
start_ts = max(start_ts, first_valid_ts)
# if an end time was passed convert it
end_ts = None
if end_str:
if type(end_str) == int:
end_ts = end_str
else:
end_ts = date_to_milliseconds(end_str)
idx = 0
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = self._klines(
spot=spot,
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where exactly the limit amount of data was returned last loop
if not len(temp_data):
break
# append this loops data to our output data
output_data += temp_data
# set our start timestamp using the last value in the array
start_ts = temp_data[-1][0]
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# increment next call by our timeframe
start_ts += timeframe
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data
def get_historical_klines_generator(self, symbol, interval, start_str, end_str=None):
"""Get Historical Klines generator from Binance
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:return: generator of OHLCV values
"""
return self._historical_klines_generator(symbol, interval, start_str, end_str=end_str, spot=True)
def _historical_klines_generator(self, symbol, interval, start_str, end_str=None, spot=True):
"""Get Historical Klines from Binance
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:param spot: Historical klines generator from spot endpoint, otherwise futures
:type spot: bool
:return: generator of OHLCV values
"""
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
if type(start_str) == int:
start_ts = start_str
else:
start_ts = date_to_milliseconds(start_str)
# establish first available start timestamp
first_valid_ts = self._get_earliest_valid_timestamp(symbol, interval, spot)
start_ts = max(start_ts, first_valid_ts)
# if an end time was passed convert it
end_ts = None
if end_str:
if type(end_str) == int:
end_ts = end_str
else:
end_ts = date_to_milliseconds(end_str)
idx = 0
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
output_data = self.get_klines(
spot=spot,
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where exactly the limit amount of data was returned last loop
if not len(output_data):
break
# yield data
for o in output_data:
yield o
# set our start timestamp using the last value in the array
start_ts = output_data[-1][0]
idx += 1
# check if we received less than the required limit and exit the loop
if len(output_data) < limit:
# exit the while loop
break
# increment next call by our timeframe
start_ts += timeframe
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
def get_avg_price(self, **params):
"""Current average price for a symbol.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#current-average-price
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"mins": 5,
"price": "9.35751834"
}
"""
return self._get('avgPrice', data=params, version=self.PRIVATE_API_VERSION)
def get_ticker(self, **params):
"""24 hour price change statistics.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
OR
.. code-block:: python
[
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/24hr', data=params, version=self.PRIVATE_API_VERSION)
def get_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"price": "4.00000200"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/price', data=params, version=self.PRIVATE_API_VERSION)
def get_orderbook_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#symbol-order-book-ticker
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/bookTicker', data=params, version=self.PRIVATE_API_VERSION)
# Account Endpoints
def create_order(self, **params):
"""Send in a new order
Any order with an icebergQty MUST have timeInForce set to GTC.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#new-order--trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param quoteOrderQty: amount the user wants to spend (when buying) or receive (when selling)
of the quote asset, applicable to MARKET orders
:type quoteOrderQty: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
Response ACK:
.. code-block:: python
{
"symbol":"LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1" # Will be newClientOrderId
"transactTime": 1499827319559
}
Response RESULT:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
}
Response FULL:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "4000.00000000",
"qty": "1.00000000",
"commission": "4.00000000",
"commissionAsset": "USDT"
},
{
"price": "3999.00000000",
"qty": "5.00000000",
"commission": "19.99500000",
"commissionAsset": "USDT"
},
{
"price": "3998.00000000",
"qty": "2.00000000",
"commission": "7.99600000",
"commissionAsset": "USDT"
},
{
"price": "3997.00000000",
"qty": "1.00000000",
"commission": "3.99700000",
"commissionAsset": "USDT"
},
{
"price": "3995.00000000",
"qty": "1.00000000",
"commission": "3.99500000",
"commissionAsset": "USDT"
}
]
}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order', True, data=params)
def order_limit(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_LIMIT,
'timeInForce': timeInForce
})
return self.create_order(**params)
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY,
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_limit_sell(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_market(self, **params):
"""Send in a new market order
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param quoteOrderQty: amount the user wants to spend (when buying) or receive (when selling)
of the quote asset
:type quoteOrderQty: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_MARKET
})
return self.create_order(**params)
def order_market_buy(self, **params):
"""Send in a new market buy order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param quoteOrderQty: the amount the user wants to spend of the quote asset
:type quoteOrderQty: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY
})
return self.order_market(**params)
def order_market_sell(self, **params):
"""Send in a new market sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param quoteOrderQty: the amount the user wants to receive of the quote asset
:type quoteOrderQty: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_market(**params)
def create_oco_order(self, **params):
"""Send in a new OCO order
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#new-oco-trade
:param symbol: required
:type symbol: str
:param listClientOrderId: A unique id for the list order. Automatically generated if not sent.
:type listClientOrderId: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param limitClientOrderId: A unique id for the limit order. Automatically generated if not sent.
:type limitClientOrderId: str
:param price: required
:type price: str
:param limitIcebergQty: Used to make the LIMIT_MAKER leg an iceberg order.
:type limitIcebergQty: decimal
:param stopClientOrderId: A unique id for the stop order. Automatically generated if not sent.
:type stopClientOrderId: str
:param stopPrice: required
:type stopPrice: str
:param stopLimitPrice: If provided, stopLimitTimeInForce is required.
:type stopLimitPrice: str
:param stopIcebergQty: Used with STOP_LOSS_LIMIT leg to make an iceberg order.
:type stopIcebergQty: decimal
:param stopLimitTimeInForce: Valid values are GTC/FOK/IOC.
:type stopLimitTimeInForce: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
Response ACK:
.. code-block:: python
{
}
Response RESULT:
.. code-block:: python
{
}
Response FULL:
.. code-block:: python
{
}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order/oco', True, data=params)
def order_oco_buy(self, **params):
"""Send in a new OCO buy order
:param symbol: required
:type symbol: str
:param listClientOrderId: A unique id for the list order. Automatically generated if not sent.
:type listClientOrderId: str
:param quantity: required
:type quantity: decimal
:param limitClientOrderId: A unique id for the limit order. Automatically generated if not sent.
:type limitClientOrderId: str
:param price: required
:type price: str
:param limitIcebergQty: Used to make the LIMIT_MAKER leg an iceberg order.
:type limitIcebergQty: decimal
:param stopClientOrderId: A unique id for the stop order. Automatically generated if not sent.
:type stopClientOrderId: str
:param stopPrice: required
:type stopPrice: str
:param stopLimitPrice: If provided, stopLimitTimeInForce is required.
:type stopLimitPrice: str
:param stopIcebergQty: Used with STOP_LOSS_LIMIT leg to make an iceberg order.
:type stopIcebergQty: decimal
:param stopLimitTimeInForce: Valid values are GTC/FOK/IOC.
:type stopLimitTimeInForce: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See OCO order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY
})
return self.create_oco_order(**params)
def order_oco_sell(self, **params):
"""Send in a new OCO sell order
:param symbol: required
:type symbol: str
:param listClientOrderId: A unique id for the list order. Automatically generated if not sent.
:type listClientOrderId: str
:param quantity: required
:type quantity: decimal
:param limitClientOrderId: A unique id for the limit order. Automatically generated if not sent.
:type limitClientOrderId: str
:param price: required
:type price: str
:param limitIcebergQty: Used to make the LIMIT_MAKER leg an iceberg order.
:type limitIcebergQty: decimal
:param stopClientOrderId: A unique id for the stop order. Automatically generated if not sent.
:type stopClientOrderId: str
:param stopPrice: required
:type stopPrice: str
:param stopLimitPrice: If provided, stopLimitTimeInForce is required.
:type stopLimitPrice: str
:param stopIcebergQty: Used with STOP_LOSS_LIMIT leg to make an iceberg order.
:type stopIcebergQty: decimal
:param stopLimitTimeInForce: Valid values are GTC/FOK/IOC.
:type stopLimitTimeInForce: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See OCO order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.create_oco_order(**params)
def create_test_order(self, **params):
"""Test new order creation and signature/recvWindow long. Creates and validates a new order but does not send it into the matching engine.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#test-new-order-trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: The number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order/test', True, data=params)
def get_order(self, **params):
"""Check an order's status. Either orderId or origClientOrderId must be sent.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#query-order-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('order', True, data=params)
def get_all_orders(self, **params):
"""Get all account orders; active, canceled, or filled.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#all-orders-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param limit: Default 500; max 500.
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('allOrders', True, data=params)
def cancel_order(self, **params):
"""Cancel an active order. Either orderId or origClientOrderId must be sent.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#cancel-order-trade
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param newClientOrderId: Used to uniquely identify this cancel. Automatically generated by default.
:type newClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"origClientOrderId": "myOrder1",
"orderId": 1,
"clientOrderId": "cancelMyOrder1"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._delete('order', True, data=params)
def get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#current-open-orders-user_data
:param symbol: optional
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('openOrders', True, data=params)
# User Stream Endpoints
def get_account(self, **params):
"""Get current account information.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#account-information-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"makerCommission": 15,
"takerCommission": 15,
"buyerCommission": 0,
"sellerCommission": 0,
"canTrade": true,
"canWithdraw": true,
"canDeposit": true,
"balances": [
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
},
{
"asset": "LTC",
"free": "4763368.68006011",
"locked": "0.00000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('account', True, data=params)
def get_asset_balance(self, asset, **params):
"""Get current asset balance.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#account-information-user_data
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: dictionary or None if not found
.. code-block:: python
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self.get_account(**params)
# find asset balance in list of balances
if "balances" in res:
for bal in res['balances']:
if bal['asset'].lower() == asset.lower():
return bal
return None
def get_my_trades(self, **params):
"""Get trades for a specific symbol.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#account-trade-list-user_data
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"commission": "10.10000000",
"commissionAsset": "BNB",
"time": 1499865549590,
"isBuyer": true,
"isMaker": false,
"isBestMatch": true
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('myTrades', True, data=params)
def get_system_status(self):
"""Get system status detail.
https://binance-docs.github.io/apidocs/spot/en/#system-status-system
:returns: API response
.. code-block:: python
{
"status": 0, # 0: normal,1:system maintenance
"msg": "normal" # normal or System maintenance.
}
:raises: BinanceAPIException
"""
return self._request_withdraw_api('get', 'systemStatus.html')
def get_account_status(self, **params):
"""Get account status detail.
https://binance-docs.github.io/apidocs/spot/en/#account-status-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "Order failed:Low Order fill rate! Will be reactivated after 5 minutes.",
"success": true,
"objs": [
"5"
]
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'accountStatus.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def get_account_api_trading_status(self, **params):
"""Fetch account api trading status detail.
https://binance-docs.github.io/apidocs/spot/en/#account-api-trading-status-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true, // Query result
"status": { // API trading status detail
"isLocked": false, // API trading function is locked or not
"plannedRecoverTime": 0, // If API trading function is locked, this is the planned recover time
"triggerCondition": {
"gcr": 150, // Number of GTC orders
"ifer": 150, // Number of FOK/IOC orders
"ufr": 300 // Number of orders
},
"indicators": { // The indicators updated every 30 seconds
"BTCUSDT": [ // The symbol
{
"i": "UFR", // Unfilled Ratio (UFR)
"c": 20, // Count of all orders
"v": 0.05, // Current UFR value
"t": 0.995 // Trigger UFR value
},
{
"i": "IFER", // IOC/FOK Expiration Ratio (IFER)
"c": 20, // Count of FOK/IOC orders
"v": 0.99, // Current IFER value
"t": 0.99 // Trigger IFER value
},
{
"i": "GCR", // GTC Cancellation Ratio (GCR)
"c": 20, // Count of GTC orders
"v": 0.99, // Current GCR value
"t": 0.99 // Trigger GCR value
}
],
"ETHUSDT": [
{
"i": "UFR",
"c": 20,
"v": 0.05,
"t": 0.995
},
{
"i": "IFER",
"c": 20,
"v": 0.99,
"t": 0.99
},
{
"i": "GCR",
"c": 20,
"v": 0.99,
"t": 0.99
}
]
},
"updateTime": 1547630471725 // The query result return time
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'apiTradingStatus.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def get_dust_log(self, **params):
"""Get log of small amounts exchanged for BNB.
https://binance-docs.github.io/apidocs/spot/en/#dustlog-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"results": {
"total": 2, //Total counts of exchange
"rows": [
{
"transfered_total": "0.00132256", # Total transfered BNB amount for this exchange.
"service_charge_total": "0.00002699", # Total service charge amount for this exchange.
"tran_id": 4359321,
"logs": [ # Details of this exchange.
{
"tranId": 4359321,
"serviceChargeAmount": "0.000009",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.000441",
"fromAsset": "USDT"
},
{
"tranId": 4359321,
"serviceChargeAmount": "0.00001799",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.00088156",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-03 17:07:04" //The time of this exchange.
},
{
"transfered_total": "0.00058795",
"service_charge_total": "0.000012",
"tran_id": 4357015,
"logs": [ // Details of this exchange.
{
"tranId": 4357015,
"serviceChargeAmount": "0.00001",
"uid": "10000015",
"amount": "0.001",
"operateTime": "2018-05-02 13:52:24",
"transferedAmount": "0.00049",
"fromAsset": "USDT"
},
{
"tranId": 4357015,
"serviceChargeAmount": "0.000002",
"uid": "10000015",
"amount": "0.0001",
"operateTime": "2018-05-02 13:51:11",
"transferedAmount": "0.00009795",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-02 13:51:11"
}
]
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'userAssetDribbletLog.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def transfer_dust(self, **params):
"""Convert dust assets to BNB.
https://binance-docs.github.io/apidocs/spot/en/#dust-transfer-user_data
:param asset: The asset being converted. e.g: 'ONE'
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
result = client.transfer_dust(asset='ONE')
:returns: API response
.. code-block:: python
{
"totalServiceCharge":"0.02102542",
"totalTransfered":"1.05127099",
"transferResult":[
{
"amount":"0.03000000",
"fromAsset":"ETH",
"operateTime":1563368549307,
"serviceChargeAmount":"0.00500000",
"tranId":2970932918,
"transferedAmount":"0.25000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'asset/dust', True, data=params)
def get_asset_dividend_history(self, **params):
"""Query asset dividend record.
https://binance-docs.github.io/apidocs/spot/en/#asset-dividend-record-user_data
:param asset: optional
:type asset: str
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
result = client.get_asset_dividend_history()
:returns: API response
.. code-block:: python
{
"rows":[
{
"amount":"10.00000000",
"asset":"BHFT",
"divTime":1563189166000,
"enInfo":"BHFT distribution",
"tranId":2968885920
},
{
"amount":"10.00000000",
"asset":"BHFT",
"divTime":1563189165000,
"enInfo":"BHFT distribution",
"tranId":2968885920
}
],
"total":2
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'asset/assetDividend', True, data=params)
def make_universal_transfer(self, **params):
"""User Universal Transfer
https://binance-docs.github.io/apidocs/spot/en/#user-universal-transfer
:param type: required
:type type: str (ENUM)
:param asset: required
:type asset: str
:param amount: required
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer_status = client.make_universal_transfer(params)
:returns: API response
.. code-block:: python
{
"tranId":13526853623
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'asset/transfer', signed=True, data=params)
def query_universal_transfer_history(self, **params):
"""Query User Universal Transfer History
https://binance-docs.github.io/apidocs/spot/en/#query-user-universal-transfer-history
:param type: required
:type type: str (ENUM)
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param current: optional - Default 1
:type current: int
:param size: required - Default 10, Max 100
:type size: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer_status = client.query_universal_transfer_history(params)
:returns: API response
.. code-block:: python
{
"total":2,
"rows":[
{
"asset":"USDT",
"amount":"1",
"type":"MAIN_UMFUTURE"
"status": "CONFIRMED",
"tranId": 11415955596,
"timestamp":1544433328000
},
{
"asset":"USDT",
"amount":"2",
"type":"MAIN_UMFUTURE",
"status": "CONFIRMED",
"tranId": 11366865406,
"timestamp":1544433328000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'asset/transfer', signed=True, data=params)
def get_trade_fee(self, **params):
"""Get trade fee.
https://binance-docs.github.io/apidocs/spot/en/#trade-fee-user_data
:param symbol: optional
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"tradeFee": [
{
"symbol": "ADABNB",
"maker": 0.9000,
"taker": 1.0000
}, {
"symbol": "BNBBTC",
"maker": 0.3000,
"taker": 0.3000
}
],
"success": true
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'tradeFee.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def get_asset_details(self, **params):
"""Fetch details on assets.
https://binance-docs.github.io/apidocs/spot/en/#asset-detail-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"assetDetail": {
"CTR": {
"minWithdrawAmount": "70.00000000", //min withdraw amount
"depositStatus": false,//deposit status
"withdrawFee": 35, // withdraw fee
"withdrawStatus": true, //withdraw status
"depositTip": "Delisted, Deposit Suspended" //reason
},
"SKY": {
"minWithdrawAmount": "0.02000000",
"depositStatus": true,
"withdrawFee": 0.01,
"withdrawStatus": true
}
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'assetDetail.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
# Withdraw Endpoints
def withdraw(self, **params):
"""Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceRequestException, BinanceAPIException, BinanceWithdrawException
"""
# force a name for the withdrawal if one not set
if 'asset' in params and 'name' not in params:
params['name'] = params['asset']
res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def get_deposit_history(self, **params):
"""Fetch deposit history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:pending,1:success) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"depositList": [
{
"insertTime": 1508198532000,
"amount": 0.04670582,
"asset": "ETH",
"status": 1
}
],
"success": true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositHistory.html', True, data=params)
def get_withdraw_history(self, **params):
"""Fetch withdraw history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:Email Sent,1:Cancelled 2:Awaiting Approval 3:Rejected 4:Processing 5:Failure 6Completed) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"withdrawList": [
{
"amount": 1,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"asset": "ETH",
"applyTime": 1508198532000
"status": 4
},
{
"amount": 0.005,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"txId": "0x80aaabed54bdab3f6de5868f89929a2371ad21d666f20f7393d1a3389fad95a1",
"asset": "ETH",
"applyTime": 1508198532000,
"status": 4
}
],
"success": true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'withdrawHistory.html', True, data=params)
def get_withdraw_history_id(self, withdraw_id, **params):
"""Fetch withdraw history.
https://www.binance.com/restapipub.html
:param withdraw_id: required
:type withdraw_id: str
:param asset: optional
:type asset: str
:type status: 0(0:Email Sent,1:Cancelled 2:Awaiting Approval 3:Rejected 4:Processing 5:Failure 6Completed) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"amount": 1,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"asset": "ETH",
"applyTime": 1508198532000
"status": 4
}
:raises: BinanceRequestException, BinanceAPIException
"""
result = self._request_withdraw_api('get', 'withdrawHistory.html', True, data=params)
for entry in result['withdrawList']:
if 'id' in entry and entry['id'] == withdraw_id:
return entry
raise Exception("There is no entry with withdraw id", result)
def get_deposit_address(self, **params):
"""Fetch a deposit address for a symbol
https://www.binance.com/restapipub.html
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"success": true,
"addressTag": "1231212",
"asset": "BNB"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositAddress.html', True, data=params)
# User Stream Endpoints
def stream_get_listen_key(self):
"""Start a new user data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the user stream alive.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#start-user-data-stream-user_stream
:returns: API response
.. code-block:: python
{
"listenKey": "pqia91ma19a5s61cv6a81va65sdf19v8a65a1a5s61cv6a81va65sdf19v8a65a1"
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self._post('userDataStream', False, data={}, version=self.PRIVATE_API_VERSION)
return res['listenKey']
def stream_keepalive(self, listenKey):
"""PING a user data stream to prevent a time out.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#keepalive-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._put('userDataStream', False, data=params, version=self.PRIVATE_API_VERSION)
def stream_close(self, listenKey):
"""Close out a user data stream.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#close-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._delete('userDataStream', False, data=params, version=self.PRIVATE_API_VERSION)
# Margin Trading Endpoints
def get_margin_account(self, **params):
"""Query cross-margin account details
https://binance-docs.github.io/apidocs/spot/en/#query-cross-margin-account-details-user_data
:returns: API response
.. code-block:: python
{
"borrowEnabled": true,
"marginLevel": "11.64405625",
"totalAssetOfBtc": "6.82728457",
"totalLiabilityOfBtc": "0.58633215",
"totalNetAssetOfBtc": "6.24095242",
"tradeEnabled": true,
"transferEnabled": true,
"userAssets": [
{
"asset": "BTC",
"borrowed": "0.00000000",
"free": "0.00499500",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00499500"
},
{
"asset": "BNB",
"borrowed": "201.66666672",
"free": "2346.50000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "2144.83333328"
},
{
"asset": "ETH",
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000"
},
{
"asset": "USDT",
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/account', True, data=params)
def get_isolated_margin_account(self, **params):
"""Query isolated margin account details
https://binance-docs.github.io/apidocs/spot/en/#query-isolated-margin-account-info-user_data
:param symbols: optional up to 5 margin pairs as a comma separated string
:type asset: str
.. code:: python
account_info = client.get_isolated_margin_account()
account_info = client.get_isolated_margin_account(symbols="BTCUSDT,ETHUSDT")
:returns: API response
.. code-block:: python
If "symbols" is not sent:
{
"assets":[
{
"baseAsset":
{
"asset": "BTC",
"borrowEnabled": true,
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000",
"netAssetOfBtc": "0.00000000",
"repayEnabled": true,
"totalAsset": "0.00000000"
},
"quoteAsset":
{
"asset": "USDT",
"borrowEnabled": true,
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000",
"netAssetOfBtc": "0.00000000",
"repayEnabled": true,
"totalAsset": "0.00000000"
},
"symbol": "BTCUSDT"
"isolatedCreated": true,
"marginLevel": "0.00000000",
"marginLevelStatus": "EXCESSIVE", // "EXCESSIVE", "NORMAL", "MARGIN_CALL", "PRE_LIQUIDATION", "FORCE_LIQUIDATION"
"marginRatio": "0.00000000",
"indexPrice": "10000.00000000"
"liquidatePrice": "1000.00000000",
"liquidateRate": "1.00000000"
"tradeEnabled": true
}
],
"totalAssetOfBtc": "0.00000000",
"totalLiabilityOfBtc": "0.00000000",
"totalNetAssetOfBtc": "0.00000000"
}
If "symbols" is sent:
{
"assets":[
{
"baseAsset":
{
"asset": "BTC",
"borrowEnabled": true,
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000",
"netAssetOfBtc": "0.00000000",
"repayEnabled": true,
"totalAsset": "0.00000000"
},
"quoteAsset":
{
"asset": "USDT",
"borrowEnabled": true,
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000",
"netAssetOfBtc": "0.00000000",
"repayEnabled": true,
"totalAsset": "0.00000000"
},
"symbol": "BTCUSDT"
"isolatedCreated": true,
"marginLevel": "0.00000000",
"marginLevelStatus": "EXCESSIVE", // "EXCESSIVE", "NORMAL", "MARGIN_CALL", "PRE_LIQUIDATION", "FORCE_LIQUIDATION"
"marginRatio": "0.00000000",
"indexPrice": "10000.00000000"
"liquidatePrice": "1000.00000000",
"liquidateRate": "1.00000000"
"tradeEnabled": true
}
]
}
"""
return self._request_margin_api('get', 'margin/isolated/account', True, data=params)
def get_margin_asset(self, **params):
"""Query cross-margin asset
https://binance-docs.github.io/apidocs/spot/en/#query-margin-asset-market_data
:param asset: name of the asset
:type asset: str
.. code:: python
asset_details = client.get_margin_asset(asset='BNB')
:returns: API response
.. code-block:: python
{
"assetFullName": "Binance Coin",
"assetName": "BNB",
"isBorrowable": false,
"isMortgageable": true,
"userMinBorrow": "0.00000000",
"userMinRepay": "0.00000000"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/asset', data=params)
def get_margin_symbol(self, **params):
"""Query cross-margin symbol info
https://binance-docs.github.io/apidocs/spot/en/#query-cross-margin-pair-market_data
:param symbol: name of the symbol pair
:type symbol: str
.. code:: python
pair_details = client.get_margin_symbol(symbol='BTCUSDT')
:returns: API response
.. code-block:: python
{
"id":323355778339572400,
"symbol":"BTCUSDT",
"base":"BTC",
"quote":"USDT",
"isMarginTrade":true,
"isBuyAllowed":true,
"isSellAllowed":true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/pair', data=params)
def create_isolated_margin_account(self, **params):
"""Create isolated margin account for symbol
https://binance-docs.github.io/apidocs/spot/en/#create-isolated-margin-account-margin
:param base: Base asset of symbol
:type base: str
:param quote: Quote asset of symbol
:type quote: str
.. code:: python
pair_details = client.create_isolated_margin_account(base='USDT', quote='BTC')
:returns: API response
.. code-block:: python
{
"success": true,
"symbol": "BTCUSDT"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'margin/isolated/create', signed=True, data=params)
def get_isolated_margin_symbol(self, **params):
"""Query isolated margin symbol info
https://binance-docs.github.io/apidocs/spot/en/#query-isolated-margin-symbol-user_data
:param symbol: name of the symbol pair
:type symbol: str
.. code:: python
pair_details = client.get_isolated_margin_symbol(symbol='BTCUSDT')
:returns: API response
.. code-block:: python
{
"symbol":"BTCUSDT",
"base":"BTC",
"quote":"USDT",
"isMarginTrade":true,
"isBuyAllowed":true,
"isSellAllowed":true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/isolated/pair', signed=True, data=params)
def get_all_isolated_margin_symbols(self, **params):
"""Query isolated margin symbol info for all pairs
https://binance-docs.github.io/apidocs/spot/en/#get-all-isolated-margin-symbol-user_data
.. code:: python
pair_details = client.get_all_isolated_margin_symbols()
:returns: API response
.. code-block:: python
[
{
"base": "BNB",
"isBuyAllowed": true,
"isMarginTrade": true,
"isSellAllowed": true,
"quote": "BTC",
"symbol": "BNBBTC"
},
{
"base": "TRX",
"isBuyAllowed": true,
"isMarginTrade": true,
"isSellAllowed": true,
"quote": "BTC",
"symbol": "TRXBTC"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/isolated/allPairs', signed=True, data=params)
def toggle_bnb_burn_spot_margin(self, **params):
"""Toggle BNB Burn On Spot Trade And Margin Interest
https://binance-docs.github.io/apidocs/spot/en/#toggle-bnb-burn-on-spot-trade-and-margin-interest-user_data
:param spotBNBBurn: Determines whether to use BNB to pay for trading fees on SPOT
:type spotBNBBurn: bool
:param interestBNBBurn: Determines whether to use BNB to pay for margin loan's interest
:type interestBNBBurn: bool
.. code:: python
response = client.toggle_bnb_burn_spot_margin()
:returns: API response
.. code-block:: python
{
"spotBNBBurn":true,
"interestBNBBurn": false
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'bnbBurn', signed=True, data=params)
def get_bnb_burn_spot_margin(self, **params):
"""Get BNB Burn Status
https://binance-docs.github.io/apidocs/spot/en/#get-bnb-burn-status-user_data
.. code:: python
status = client.get_bnb_burn_spot_margin()
:returns: API response
.. code-block:: python
{
"spotBNBBurn":true,
"interestBNBBurn": false
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'bnbBurn', signed=True, data=params)
def get_margin_price_index(self, **params):
"""Query margin priceIndex
https://binance-docs.github.io/apidocs/spot/en/#query-margin-priceindex-market_data
:param symbol: name of the symbol pair
:type symbol: str
.. code:: python
price_index_details = client.get_margin_price_index(symbol='BTCUSDT')
:returns: API response
.. code-block:: python
{
"calcTime": 1562046418000,
"price": "0.00333930",
"symbol": "BNBBTC"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/priceIndex', data=params)
def transfer_margin_to_spot(self, **params):
"""Execute transfer between cross-margin account and spot account.
https://binance-docs.github.io/apidocs/spot/en/#cross-margin-account-transfer-margin
:param asset: name of the asset
:type asset: str
:param amount: amount to transfer
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer = client.transfer_margin_to_spot(asset='BTC', amount='1.1')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
params['type'] = 2
return self._request_margin_api('post', 'margin/transfer', signed=True, data=params)
def transfer_spot_to_margin(self, **params):
"""Execute transfer between spot account and cross-margin account.
https://binance-docs.github.io/apidocs/spot/en/#cross-margin-account-transfer-margin
:param asset: name of the asset
:type asset: str
:param amount: amount to transfer
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer = client.transfer_spot_to_margin(asset='BTC', amount='1.1')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
params['type'] = 1
return self._request_margin_api('post', 'margin/transfer', signed=True, data=params)
def transfer_isolated_margin_to_spot(self, **params):
"""Execute transfer between isolated margin account and spot account.
https://binance-docs.github.io/apidocs/spot/en/#isolated-margin-account-transfer-margin
:param asset: name of the asset
:type asset: str
:param symbol: pair symbol
:type symbol: str
:param amount: amount to transfer
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer = client.transfer_isolated_margin_to_spot(asset='BTC',
symbol='ETHBTC', amount='1.1')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
params['transFrom'] = "ISOLATED_MARGIN"
params['transTo'] = "SPOT"
return self._request_margin_api('post', 'margin/isolated/transfer', signed=True, data=params)
def transfer_spot_to_isolated_margin(self, **params):
"""Execute transfer between spot account and isolated margin account.
https://binance-docs.github.io/apidocs/spot/en/#isolated-margin-account-transfer-margin
:param asset: name of the asset
:type asset: str
:param symbol: pair symbol
:type symbol: str
:param amount: amount to transfer
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer = client.transfer_spot_to_isolated_margin(asset='BTC',
symbol='ETHBTC', amount='1.1')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
params['transFrom'] = "SPOT"
params['transTo'] = "ISOLATED_MARGIN"
return self._request_margin_api('post', 'margin/isolated/transfer', signed=True, data=params)
def create_margin_loan(self, **params):
"""Apply for a loan in cross-margin or isolated-margin account.
https://binance-docs.github.io/apidocs/spot/en/#margin-account-borrow-margin
:param asset: name of the asset
:type asset: str
:param amount: amount to transfer
:type amount: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param symbol: Isolated margin symbol (default blank for cross-margin)
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transaction = client.margin_create_loan(asset='BTC', amount='1.1')
transaction = client.margin_create_loan(asset='BTC', amount='1.1',
isIsolated='TRUE', symbol='ETHBTC')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'margin/loan', signed=True, data=params)
def repay_margin_loan(self, **params):
"""Repay loan in cross-margin or isolated-margin account.
If amount is more than the amount borrowed, the full loan will be repaid.
https://binance-docs.github.io/apidocs/spot/en/#margin-account-repay-margin
:param asset: name of the asset
:type asset: str
:param amount: amount to transfer
:type amount: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param symbol: Isolated margin symbol (default blank for cross-margin)
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transaction = client.margin_repay_loan(asset='BTC', amount='1.1')
transaction = client.margin_repay_loan(asset='BTC', amount='1.1',
isIsolated='TRUE', symbol='ETHBTC')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'margin/repay', signed=True, data=params)
def create_margin_order(self, **params):
"""Post a new order for margin account.
https://binance-docs.github.io/apidocs/spot/en/#margin-account-new-order-trade
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param side: required
:type side: str
:param type: required
:type type: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param stopPrice: Used with STOP_LOSS, STOP_LOSS_LIMIT, TAKE_PROFIT, and TAKE_PROFIT_LIMIT orders.
:type stopPrice: str
:param timeInForce: required if limit order GTC,IOC,FOK
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; MARKET and LIMIT order types default to
FULL, all other orders default to ACK.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
Response ACK:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595
}
Response RESULT:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "1.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"cummulativeQuoteQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
}
Response FULL:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "1.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"cummulativeQuoteQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "4000.00000000",
"qty": "1.00000000",
"commission": "4.00000000",
"commissionAsset": "USDT"
},
{
"price": "3999.00000000",
"qty": "5.00000000",
"commission": "19.99500000",
"commissionAsset": "USDT"
},
{
"price": "3998.00000000",
"qty": "2.00000000",
"commission": "7.99600000",
"commissionAsset": "USDT"
},
{
"price": "3997.00000000",
"qty": "1.00000000",
"commission": "3.99700000",
"commissionAsset": "USDT"
},
{
"price": "3995.00000000",
"qty": "1.00000000",
"commission": "3.99500000",
"commissionAsset": "USDT"
}
]
}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException,
BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException,
BinanceOrderInactiveSymbolException
"""
return self._request_margin_api('post', 'margin/order', signed=True, data=params)
def cancel_margin_order(self, **params):
"""Cancel an active order for margin account.
Either orderId or origClientOrderId must be sent.
https://binance-docs.github.io/apidocs/spot/en/#margin-account-cancel-order-trade
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param orderId:
:type orderId: str
:param origClientOrderId:
:type origClientOrderId: str
:param newClientOrderId: Used to uniquely identify this cancel. Automatically generated by default.
:type newClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"symbol": "LTCBTC",
"orderId": 28,
"origClientOrderId": "myOrder1",
"clientOrderId": "cancelMyOrder1",
"transactTime": 1507725176595,
"price": "1.00000000",
"origQty": "10.00000000",
"executedQty": "8.00000000",
"cummulativeQuoteQty": "8.00000000",
"status": "CANCELED",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "SELL"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('delete', 'margin/order', signed=True, data=params)
def get_margin_loan_details(self, **params):
"""Query loan record
txId or startTime must be sent. txId takes precedence.
https://binance-docs.github.io/apidocs/spot/en/#query-loan-record-user_data
:param asset: required
:type asset: str
:param isolatedSymbol: isolated symbol (if querying isolated margin)
:type isolatedSymbol: str
:param txId: the tranId in of the created loan
:type txId: str
:param startTime: earliest timestamp to filter transactions
:type startTime: str
:param endTime: Used to uniquely identify this cancel. Automatically generated by default.
:type endTime: str
:param current: Currently querying page. Start from 1. Default:1
:type current: str
:param size: Default:10 Max:100
:type size: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"rows": [
{
"asset": "BNB",
"principal": "0.84624403",
"timestamp": 1555056425000,
//one of PENDING (pending to execution), CONFIRMED (successfully loaned), FAILED (execution failed, nothing happened to your account);
"status": "CONFIRMED"
}
],
"total": 1
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/loan', signed=True, data=params)
def get_margin_repay_details(self, **params):
"""Query repay record
txId or startTime must be sent. txId takes precedence.
https://binance-docs.github.io/apidocs/spot/en/#query-repay-record-user_data
:param asset: required
:type asset: str
:param isolatedSymbol: isolated symbol (if querying isolated margin)
:type isolatedSymbol: str
:param txId: the tranId in of the created loan
:type txId: str
:param startTime:
:type startTime: str
:param endTime: Used to uniquely identify this cancel. Automatically generated by default.
:type endTime: str
:param current: Currently querying page. Start from 1. Default:1
:type current: str
:param size: Default:10 Max:100
:type size: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"rows": [
{
//Total amount repaid
"amount": "14.00000000",
"asset": "BNB",
//Interest repaid
"interest": "0.01866667",
//Principal repaid
"principal": "13.98133333",
//one of PENDING (pending to execution), CONFIRMED (successfully loaned), FAILED (execution failed, nothing happened to your account);
"status": "CONFIRMED",
"timestamp": 1563438204000,
"txId": 2970933056
}
],
"total": 1
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/repay', signed=True, data=params)
def get_margin_order(self, **params):
"""Query margin accounts order
Either orderId or origClientOrderId must be sent.
For some historical orders cummulativeQuoteQty will be < 0, meaning the data is not available at this time.
https://binance-docs.github.io/apidocs/spot/en/#query-margin-account-39-s-order-user_data
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param orderId:
:type orderId: str
:param origClientOrderId:
:type origClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"clientOrderId": "ZwfQzuDIGpceVhKW5DvCmO",
"cummulativeQuoteQty": "0.00000000",
"executedQty": "0.00000000",
"icebergQty": "0.00000000",
"isWorking": true,
"orderId": 213205622,
"origQty": "0.30000000",
"price": "0.00493630",
"side": "SELL",
"status": "NEW",
"stopPrice": "0.00000000",
"symbol": "BNBBTC",
"time": 1562133008725,
"timeInForce": "GTC",
"type": "LIMIT",
"updateTime": 1562133008725
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/order', signed=True, data=params)
def get_open_margin_orders(self, **params):
"""Query margin accounts open orders
If the symbol is not sent, orders for all symbols will be returned in an array (cross-margin only).
If querying isolated margin orders, both the isIsolated='TRUE' and symbol=symbol_name must be set.
When all symbols are returned, the number of requests counted against the rate limiter is equal to the number
of symbols currently trading on the exchange.
https://binance-docs.github.io/apidocs/spot/en/#query-margin-account-39-s-open-order-user_data
:param symbol: optional
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
[
{
"clientOrderId": "qhcZw71gAkCCTv0t0k8LUK",
"cummulativeQuoteQty": "0.00000000",
"executedQty": "0.00000000",
"icebergQty": "0.00000000",
"isWorking": true,
"orderId": 211842552,
"origQty": "0.30000000",
"price": "0.00475010",
"side": "SELL",
"status": "NEW",
"stopPrice": "0.00000000",
"symbol": "BNBBTC",
"time": 1562040170089,
"timeInForce": "GTC",
"type": "LIMIT",
"updateTime": 1562040170089
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/openOrders', signed=True, data=params)
def get_all_margin_orders(self, **params):
"""Query all margin accounts orders
If orderId is set, it will get orders >= that orderId. Otherwise most recent orders are returned.
For some historical orders cummulativeQuoteQty will be < 0, meaning the data is not available at this time.
https://binance-docs.github.io/apidocs/spot/en/#query-margin-account-39-s-all-order-user_data
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param orderId: optional
:type orderId: str
:param startTime: optional
:type startTime: str
:param endTime: optional
:type endTime: str
:param limit: Default 500; max 1000
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
[
{
"id": 43123876,
"price": "0.00395740",
"qty": "4.06000000",
"quoteQty": "0.01606704",
"symbol": "BNBBTC",
"time": 1556089977693
},
{
"id": 43123877,
"price": "0.00395740",
"qty": "0.77000000",
"quoteQty": "0.00304719",
"symbol": "BNBBTC",
"time": 1556089977693
},
{
"id": 43253549,
"price": "0.00428930",
"qty": "23.30000000",
"quoteQty": "0.09994069",
"symbol": "BNBBTC",
"time": 1556163963504
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/allOrders', signed=True, data=params)
def get_margin_trades(self, **params):
"""Query margin accounts trades
If fromId is set, it will get orders >= that fromId. Otherwise most recent orders are returned.
https://binance-docs.github.io/apidocs/spot/en/#query-margin-account-39-s-trade-list-user_data
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param fromId: optional
:type fromId: str
:param startTime: optional
:type startTime: str
:param endTime: optional
:type endTime: str
:param limit: Default 500; max 1000
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
[
{
"commission": "0.00006000",
"commissionAsset": "BTC",
"id": 34,
"isBestMatch": true,
"isBuyer": false,
"isMaker": false,
"orderId": 39324,
"price": "0.02000000",
"qty": "3.00000000",
"symbol": "BNBBTC",
"time": 1561973357171
}, {
"commission": "0.00002950",
"commissionAsset": "BTC",
"id": 32,
"isBestMatch": true,
"isBuyer": false,
"isMaker": true,
"orderId": 39319,
"price": "0.00590000",
"qty": "5.00000000",
"symbol": "BNBBTC",
"time": 1561964645345
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/myTrades', signed=True, data=params)
def get_max_margin_loan(self, **params):
"""Query max borrow amount for an asset
https://binance-docs.github.io/apidocs/spot/en/#query-max-borrow-user_data
:param asset: required
:type asset: str
:param isolatedSymbol: isolated symbol (if querying isolated margin)
:type isolatedSymbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"amount": "1.69248805"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/maxBorrowable', signed=True, data=params)
def get_max_margin_transfer(self, **params):
"""Query max transfer-out amount
https://binance-docs.github.io/apidocs/spot/en/#query-max-transfer-out-amount-user_data
:param asset: required
:type asset: str
:param isolatedSymbol: isolated symbol (if querying isolated margin)
:type isolatedSymbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"amount": "3.59498107"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/maxTransferable', signed=True, data=params)
# Cross-margin
def margin_stream_get_listen_key(self):
"""Start a new cross-margin data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the stream alive.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-margin
:returns: API response
.. code-block:: python
{
"listenKey": "pqia91ma19a5s61cv6a81va65sdf19v8a65a1a5s61cv6a81va65sdf19v8a65a1"
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self._request_margin_api('post', 'userDataStream', signed=False, data={})
return res['listenKey']
def margin_stream_keepalive(self, listenKey):
"""PING a cross-margin data stream to prevent a time out.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-margin
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._request_margin_api('put', 'userDataStream', signed=False, data=params)
def margin_stream_close(self, listenKey):
"""Close out a cross-margin data stream.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-margin
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._request_margin_api('delete', 'userDataStream', signed=False, data=params)
# Isolated margin
def isolated_margin_stream_get_listen_key(self, symbol):
"""Start a new isolated margin data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the stream alive.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-isolated-margin
:param symbol: required - symbol for the isolated margin account
:type symbol: str
:returns: API response
.. code-block:: python
{
"listenKey": "T3ee22BIYuWqmvne0HNq2A2WsFlEtLhvWCtItw6ffhhdmjifQ2tRbuKkTHhr"
}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'symbol': symbol
}
res = self._request_margin_api('post', 'userDataStream/isolated', signed=False, data=params)
return res['listenKey']
def isolated_margin_stream_keepalive(self, symbol, listenKey):
"""PING an isolated margin data stream to prevent a time out.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-isolated-margin
:param symbol: required - symbol for the isolated margin account
:type symbol: str
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'symbol': symbol,
'listenKey': listenKey
}
return self._request_margin_api('put', 'userDataStream/isolated', signed=False, data=params)
def isolated_margin_stream_close(self, symbol, listenKey):
"""Close out an isolated margin data stream.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-isolated-margin
:param symbol: required - symbol for the isolated margin account
:type symbol: str
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'symbol': symbol,
'listenKey': listenKey
}
return self._request_margin_api('delete', 'userDataStream/isolated', signed=False, data=params)
# Lending Endpoints
def get_lending_product_list(self, **params):
"""Get Lending Product List
https://binance-docs.github.io/apidocs/spot/en/#get-flexible-product-list-user_data
"""
return self._request_margin_api('get', 'lending/daily/product/list', signed=True, data=params)
def get_lending_daily_quota_left(self, **params):
"""Get Left Daily Purchase Quota of Flexible Product.
https://binance-docs.github.io/apidocs/spot/en/#get-left-daily-purchase-quota-of-flexible-product-user_data
"""
return self._request_margin_api('get', 'lending/daily/userLeftQuota', signed=True, data=params)
def purchase_lending_product(self, **params):
"""Purchase Flexible Product
https://binance-docs.github.io/apidocs/spot/en/#purchase-flexible-product-user_data
"""
return self._request_margin_api('post', 'lending/daily/purchase', signed=True, data=params)
def get_lending_daily_redemption_quota(self, **params):
"""Get Left Daily Redemption Quota of Flexible Product
https://binance-docs.github.io/apidocs/spot/en/#get-left-daily-redemption-quota-of-flexible-product-user_data
"""
return self._request_margin_api('get', 'lending/daily/userRedemptionQuota', signed=True, data=params)
def redeem_lending_product(self, **params):
"""Redeem Flexible Product
https://binance-docs.github.io/apidocs/spot/en/#redeem-flexible-product-user_data
"""
return self._request_margin_api('post', 'lending/daily/redeem', signed=True, data=params)
def get_lending_position(self, **params):
"""Get Flexible Product Position
https://binance-docs.github.io/apidocs/spot/en/#get-flexible-product-position-user_data
"""
return self._request_margin_api('get', 'lending/daily/token/position', signed=True, data=params)
def get_fixed_activity_project_list(self, **params):
"""Get Fixed and Activity Project List
https://binance-docs.github.io/apidocs/spot/en/#get-fixed-and-activity-project-list-user_data
:param asset: optional
:type asset: str
:param type: required - "ACTIVITY", "CUSTOMIZED_FIXED"
:type type: str
:param status: optional - "ALL", "SUBSCRIBABLE", "UNSUBSCRIBABLE"; default "ALL"
:type status: str
:param sortBy: optional - "START_TIME", "LOT_SIZE", "INTEREST_RATE", "DURATION"; default "START_TIME"
:type sortBy: str
:param current: optional - Currently querying page. Start from 1. Default:1
:type current: int
:param size: optional - Default:10, Max:100
:type size: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"asset": "USDT",
"displayPriority": 1,
"duration": 90,
"interestPerLot": "1.35810000",
"interestRate": "0.05510000",
"lotSize": "100.00000000",
"lotsLowLimit": 1,
"lotsPurchased": 74155,
"lotsUpLimit": 80000,
"maxLotsPerUser": 2000,
"needKyc": False,
"projectId": "CUSDT90DAYSS001",
"projectName": "USDT",
"status": "PURCHASING",
"type": "CUSTOMIZED_FIXED",
"withAreaLimitation": False
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'lending/project/list', signed=True, data=params)
def get_lending_account(self, **params):
"""Get Lending Account Details
https://binance-docs.github.io/apidocs/spot/en/#lending-account-user_data
"""
return self._request_margin_api('get', 'lending/union/account', signed=True, data=params)
def get_lending_purchase_history(self, **params):
"""Get Lending Purchase History
https://binance-docs.github.io/apidocs/spot/en/#get-purchase-record-user_data
"""
return self._request_margin_api('get', 'lending/union/purchaseRecord', signed=True, data=params)
def get_lending_redemption_history(self, **params):
"""Get Lending Redemption History
https://binance-docs.github.io/apidocs/spot/en/#get-redemption-record-user_data
"""
return self._request_margin_api('get', 'lending/union/redemptionRecord', signed=True, data=params)
def get_lending_interest_history(self, **params):
"""Get Lending Interest History
https://binance-docs.github.io/apidocs/spot/en/#get-interest-history-user_data-2
"""
return self._request_margin_api('get', 'lending/union/interestHistory', signed=True, data=params)
def change_fixed_activity_to_daily_position(self, **params):
"""Change Fixed/Activity Position to Daily Position
https://binance-docs.github.io/apidocs/spot/en/#change-fixed-activity-position-to-daily-position-user_data
"""
return self._request_margin_api('post', 'lending/positionChanged', signed=True, data=params)
# Sub Accounts
def get_sub_account_list(self, **params):
"""Query Sub-account List.
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-list-for-master-account
:param email: optional
:type email: str
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param page: optional
:type page: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"subAccounts":[
{
"email":"123@test.com",
"status":"enabled",
"activated":true,
"mobile":"91605290",
"gAuth":true,
"createTime":1544433328000
},
{
"email":"321@test.com",
"status":"disabled",
"activated":true,
"mobile":"22501238",
"gAuth":true,
"createTime":1544433328000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'sub-account/list.html', True, data=params)
def get_sub_account_transfer_history(self, **params):
"""Query Sub-account Transfer History.
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-spot-asset-transfer-history-for-master-account
:param email: required
:type email: str
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param page: optional
:type page: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"transfers":[
{
"from":"aaa@test.com",
"to":"bbb@test.com",
"asset":"BTC",
"qty":"1",
"time":1544433328000
},
{
"from":"bbb@test.com",
"to":"ccc@test.com",
"asset":"ETH",
"qty":"2",
"time":1544433328000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'sub-account/transfer/history.html', True, data=params)
def create_sub_account_transfer(self, **params):
"""Execute sub-account transfer
https://binance-docs.github.io/apidocs/spot/en/#sub-account-spot-asset-transfer-for-master-account
:param fromEmail: required - Sender email
:type fromEmail: str
:param toEmail: required - Recipient email
:type toEmail: str
:param asset: required
:type asset: str
:param amount: required
:type amount: decimal
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('post', 'sub-account/transfer.html', True, data=params)
def get_sub_account_futures_transfer_history(self, **params):
"""Query Sub-account Futures Transfer History.
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-futures-asset-transfer-history-for-master-account
:param email: required
:type email: str
:param futuresType: required
:type futuresType: int
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param page: optional
:type page: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"futuresType": 2,
"transfers":[
{
"from":"aaa@test.com",
"to":"bbb@test.com",
"asset":"BTC",
"qty":"1",
"time":1544433328000
},
{
"from":"bbb@test.com",
"to":"ccc@test.com",
"asset":"ETH",
"qty":"2",
"time":1544433328000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/futures/internalTransfer', True, data=params)
def create_sub_account_futures_transfer(self, **params):
"""Execute sub-account Futures transfer
https://github.com/binance-exchange/binance-official-api-docs/blob/9dbe0e961b80557bb19708a707c7fad08842b28e/wapi-api.md#sub-account-transferfor-master-account
:param fromEmail: required - Sender email
:type fromEmail: str
:param toEmail: required - Recipient email
:type toEmail: str
:param futuresType: required
:type futuresType: int
:param asset: required
:type asset: str
:param amount: required
:type amount: decimal
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"txnId":"2934662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/futures/internalTransfer', True, data=params)
def get_sub_account_assets(self, **params):
"""Fetch sub-account assets
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-assets-for-master-account
:param email: required
:type email: str
:param symbol: optional
:type symbol: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"balances":[
{
"asset":"ADA",
"free":10000,
"locked":0
},
{
"asset":"BNB",
"free":10003,
"locked":0
},
{
"asset":"BTC",
"free":11467.6399,
"locked":0
},
{
"asset":"ETH",
"free":10004.995,
"locked":0
},
{
"asset":"USDT",
"free":11652.14213,
"locked":0
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'sub-account/assets.html', True, data=params)
def query_subaccount_spot_summary(self, **params):
"""Query Sub-account Spot Assets Summary (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-spot-assets-summary-for-master-account
:param email: optional - Sub account email
:type email: str
:param page: optional - default 1
:type page: int
:param size: optional - default 10, max 20
:type size: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"totalCount":2,
"masterAccountTotalAsset": "0.23231201",
"spotSubUserAssetBtcVoList":[
{
"email":"sub123@test.com",
"totalAsset":"9999.00000000"
},
{
"email":"test456@test.com",
"totalAsset":"0.00000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/spotSummary', True, data=params)
def get_subaccount_deposit_address(self, **params):
"""Get Sub-account Deposit Address (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-sub-account-deposit-address-for-master-account
:param email: required - Sub account email
:type email: str
:param coin: required
:type coin: str
:param network: optional
:type network: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"address":"TDunhSa7jkTNuKrusUTU1MUHtqXoBPKETV",
"coin":"USDT",
"tag":"",
"url":"https://tronscan.org/#/address/TDunhSa7jkTNuKrusUTU1MUHtqXoBPKETV"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'capital/deposit/subAddress', True, data=params)
def get_subaccount_deposit_history(self, **params):
"""Get Sub-account Deposit History (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-sub-account-deposit-address-for-master-account
:param email: required - Sub account email
:type email: str
:param coin: optional
:type coin: str
:param status: optional - (0:pending,6: credited but cannot withdraw, 1:success)
:type status: int
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param limit: optional
:type limit: int
:param offset: optional - default:0
:type offset: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"amount":"0.00999800",
"coin":"PAXG",
"network":"ETH",
"status":1,
"address":"0x788cabe9236ce061e5a892e1a59395a81fc8d62c",
"addressTag":"",
"txId":"0xaad4654a3234aa6118af9b4b335f5ae81c360b2394721c019b5d1e75328b09f3",
"insertTime":1599621997000,
"transferType":0,
"confirmTimes":"12/12"
},
{
"amount":"0.50000000",
"coin":"IOTA",
"network":"IOTA",
"status":1,
"address":"SIZ9VLMHWATXKV99LH99CIGFJFUMLEHGWVZVNNZXRJJVWBPHYWPPBOSDORZ9EQSHCZAMPVAPGFYQAUUV9DROOXJLNW",
"addressTag":"",
"txId":"ESBFVQUTPIWQNJSPXFNHNYHSQNTGKRVKPRABQWTAXCDWOAKDKYWPTVG9BGXNVNKTLEJGESAVXIKIZ9999",
"insertTime":1599620082000,
"transferType":0,
"confirmTimes":"1/1"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'capital/deposit/subHisrec', True, data=params)
def get_subaccount_futures_margin_status(self, **params):
"""Get Sub-account's Status on Margin/Futures (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-sub-account-39-s-status-on-margin-futures-for-master-account
:param email: optional - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"email":"123@test.com", // user email
"isSubUserEnabled": true, // true or false
"isUserActive": true, // true or false
"insertTime": 1570791523523 // sub account create time
"isMarginEnabled": true, // true or false for margin
"isFutureEnabled": true // true or false for futures.
"mobile": 1570791523523 // user mobile number
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/status', True, data=params)
def enable_subaccount_margin(self, **params):
"""Enable Margin for Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#enable-margin-for-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"email":"123@test.com",
"isMarginEnabled": true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/margin/enable', True, data=params)
def get_subaccount_margin_details(self, **params):
"""Get Detail on Sub-account's Margin Account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-detail-on-sub-account-39-s-margin-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"email":"123@test.com",
"marginLevel": "11.64405625",
"totalAssetOfBtc": "6.82728457",
"totalLiabilityOfBtc": "0.58633215",
"totalNetAssetOfBtc": "6.24095242",
"marginTradeCoeffVo":
{
"forceLiquidationBar": "1.10000000", // Liquidation margin ratio
"marginCallBar": "1.50000000", // Margin call margin ratio
"normalBar": "2.00000000" // Initial margin ratio
},
"marginUserAssetVoList": [
{
"asset": "BTC",
"borrowed": "0.00000000",
"free": "0.00499500",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00499500"
},
{
"asset": "BNB",
"borrowed": "201.66666672",
"free": "2346.50000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "2144.83333328"
},
{
"asset": "ETH",
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000"
},
{
"asset": "USDT",
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/margin/account', True, data=params)
def get_subaccount_margin_summary(self, **params):
"""Get Summary of Sub-account's Margin Account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-summary-of-sub-account-39-s-margin-account-for-master-account
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"totalAssetOfBtc": "4.33333333",
"totalLiabilityOfBtc": "2.11111112",
"totalNetAssetOfBtc": "2.22222221",
"subAccountList":[
{
"email":"123@test.com",
"totalAssetOfBtc": "2.11111111",
"totalLiabilityOfBtc": "1.11111111",
"totalNetAssetOfBtc": "1.00000000"
},
{
"email":"345@test.com",
"totalAssetOfBtc": "2.22222222",
"totalLiabilityOfBtc": "1.00000001",
"totalNetAssetOfBtc": "1.22222221"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/margin/accountSummary', True, data=params)
def enable_subaccount_futures(self, **params):
"""Enable Futures for Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#enable-futures-for-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"email":"123@test.com",
"isFuturesEnabled": true // true or false
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/futures/enable', True, data=params)
def get_subaccount_futures_details(self, **params):
"""Get Detail on Sub-account's Futures Account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-detail-on-sub-account-39-s-futures-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"email": "abc@test.com",
"asset": "USDT",
"assets":[
{
"asset": "USDT",
"initialMargin": "0.00000000",
"maintenanceMargin": "0.00000000",
"marginBalance": "0.88308000",
"maxWithdrawAmount": "0.88308000",
"openOrderInitialMargin": "0.00000000",
"positionInitialMargin": "0.00000000",
"unrealizedProfit": "0.00000000",
"walletBalance": "0.88308000"
}
],
"canDeposit": true,
"canTrade": true,
"canWithdraw": true,
"feeTier": 2,
"maxWithdrawAmount": "0.88308000",
"totalInitialMargin": "0.00000000",
"totalMaintenanceMargin": "0.00000000",
"totalMarginBalance": "0.88308000",
"totalOpenOrderInitialMargin": "0.00000000",
"totalPositionInitialMargin": "0.00000000",
"totalUnrealizedProfit": "0.00000000",
"totalWalletBalance": "0.88308000",
"updateTime": 1576756674610
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/futures/account', True, data=params)
def get_subaccount_futures_summary(self, **params):
"""Get Summary of Sub-account's Futures Account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-summary-of-sub-account-39-s-futures-account-for-master-account
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"totalInitialMargin": "9.83137400",
"totalMaintenanceMargin": "0.41568700",
"totalMarginBalance": "23.03235621",
"totalOpenOrderInitialMargin": "9.00000000",
"totalPositionInitialMargin": "0.83137400",
"totalUnrealizedProfit": "0.03219710",
"totalWalletBalance": "22.15879444",
"asset": "USDT",
"subAccountList":[
{
"email": "123@test.com",
"totalInitialMargin": "9.00000000",
"totalMaintenanceMargin": "0.00000000",
"totalMarginBalance": "22.12659734",
"totalOpenOrderInitialMargin": "9.00000000",
"totalPositionInitialMargin": "0.00000000",
"totalUnrealizedProfit": "0.00000000",
"totalWalletBalance": "22.12659734",
"asset": "USDT"
},
{
"email": "345@test.com",
"totalInitialMargin": "0.83137400",
"totalMaintenanceMargin": "0.41568700",
"totalMarginBalance": "0.90575887",
"totalOpenOrderInitialMargin": "0.00000000",
"totalPositionInitialMargin": "0.83137400",
"totalUnrealizedProfit": "0.03219710",
"totalWalletBalance": "0.87356177",
"asset": "USDT"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/futures/accountSummary', True, data=params)
def get_subaccount_futures_positionrisk(self, **params):
"""Get Futures Position-Risk of Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-futures-position-risk-of-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"entryPrice": "9975.12000",
"leverage": "50", // current initial leverage
"maxNotional": "1000000", // notional value limit of current initial leverage
"liquidationPrice": "7963.54",
"markPrice": "9973.50770517",
"positionAmount": "0.010",
"symbol": "BTCUSDT",
"unrealizedProfit": "-0.01612295"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/futures/positionRisk', True, data=params)
def make_subaccount_futures_transfer(self, **params):
"""Futures Transfer for Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#futures-transfer-for-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required - The amount to be transferred
:type amount: float
:param type: required - 1: transfer from subaccount's spot account to its USDT-margined futures account
2: transfer from subaccount's USDT-margined futures account to its spot account
3: transfer from subaccount's spot account to its COIN-margined futures account
4: transfer from subaccount's COIN-margined futures account to its spot account
:type type: int
:returns: API response
.. code-block:: python
{
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/futures/transfer', True, data=params)
def make_subaccount_margin_transfer(self, **params):
"""Margin Transfer for Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#margin-transfer-for-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required - The amount to be transferred
:type amount: float
:param type: required - 1: transfer from subaccount's spot account to margin account
2: transfer from subaccount's margin account to its spot account
:type type: int
:returns: API response
.. code-block:: python
{
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/margin/transfer', True, data=params)
def make_subaccount_to_subaccount_transfer(self, **params):
"""Transfer to Sub-account of Same Master (For Sub-account)
https://binance-docs.github.io/apidocs/spot/en/#transfer-to-sub-account-of-same-master-for-sub-account
:param toEmail: required - Sub account email
:type toEmail: str
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required - The amount to be transferred
:type amount: float
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/transfer/subToSub', True, data=params)
def make_subaccount_to_master_transfer(self, **params):
"""Transfer to Master (For Sub-account)
https://binance-docs.github.io/apidocs/spot/en/#transfer-to-master-for-sub-account
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required - The amount to be transferred
:type amount: float
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/transfer/subToMaster', True, data=params)
def get_subaccount_transfer_history(self, **params):
"""Sub-account Transfer History (For Sub-account)
https://binance-docs.github.io/apidocs/spot/en/#transfer-to-master-for-sub-account
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param type: optional - 1: transfer in, 2: transfer out
:type type: int
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param limit: optional - Default 500
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"counterParty":"master",
"email":"master@test.com",
"type":1, // 1 for transfer in, 2 for transfer out
"asset":"BTC",
"qty":"1",
"status":"SUCCESS",
"tranId":11798835829,
"time":1544433325000
},
{
"counterParty":"subAccount",
"email":"sub2@test.com",
"type":2,
"asset":"ETH",
"qty":"2",
"status":"SUCCESS",
"tranId":11798829519,
"time":1544433326000
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/transfer/subUserHistory', True, data=params)
def make_universal_transfer(self, **params):
"""Universal Transfer (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#universal-transfer-for-master-account
:param fromEmail: optional
:type fromEmail: str
:param toEmail: optional
:type toEmail: str
:param fromAccountType: required
:type fromAccountType: str
:param toAccountType: required
:type toAccountType: str
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required
:type amount: float
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"tranId":11945860693
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/universalTransfer', True, data=params)
def get_universal_transfer_history(self, **params):
"""Universal Transfer (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#query-universal-transfer-history
:param fromEmail: optional
:type fromEmail: str
:param toEmail: optional
:type toEmail: str
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param page: optional
:type page: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"tranId":11945860693,
"fromEmail":"master@test.com",
"toEmail":"subaccount1@test.com",
"asset":"BTC",
"amount":"0.1",
"fromAccountType":"SPOT",
"toAccountType":"COIN_FUTURE",
"status":"SUCCESS",
"createTimeStamp":1544433325000
},
{
"tranId":11945857955,
"fromEmail":"master@test.com",
"toEmail":"subaccount2@test.com",
"asset":"ETH",
"amount":"0.2",
"fromAccountType":"SPOT",
"toAccountType":"USDT_FUTURE",
"status":"SUCCESS",
"createTimeStamp":1544433326000
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/universalTransfer', True, data=params)
# Futures API
def futures_ping(self):
"""Test connectivity to the Rest API
https://binance-docs.github.io/apidocs/futures/en/#test-connectivity
"""
return self._request_futures_api('get', 'ping')
def futures_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://binance-docs.github.io/apidocs/futures/en/#check-server-time
"""
return self._request_futures_api('get', 'time')
def futures_exchange_info(self):
"""Current exchange trading rules and symbol information
https://binance-docs.github.io/apidocs/futures/en/#exchange-information-market_data
"""
return self._request_futures_api('get', 'exchangeInfo')
def futures_order_book(self, **params):
"""Get the Order Book for the market
https://binance-docs.github.io/apidocs/futures/en/#order-book-market_data
"""
return self._request_futures_api('get', 'depth', data=params)
def futures_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://binance-docs.github.io/apidocs/futures/en/#recent-trades-list-market_data
"""
return self._request_futures_api('get', 'trades', data=params)
def futures_historical_trades(self, **params):
"""Get older market historical trades.
https://binance-docs.github.io/apidocs/futures/en/#old-trades-lookup-market_data
"""
return self._request_futures_api('get', 'historicalTrades', data=params)
def futures_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time, from the same order, with the same
price will have the quantity aggregated.
https://binance-docs.github.io/apidocs/futures/en/#compressed-aggregate-trades-list-market_data
"""
return self._request_futures_api('get', 'aggTrades', data=params)
def futures_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://binance-docs.github.io/apidocs/futures/en/#kline-candlestick-data-market_data
"""
return self._request_futures_api('get', 'klines', data=params)
def futures_historical_klines(self, symbol, interval, start_str, end_str=None,
limit=500):
"""Get Historical Klines from Binance
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:param limit: Default 500; max 1000.
:type limit: int
:return: list of OHLCV values
"""
return self._historical_klines(symbol, interval, start_str, end_str=None, limit=500, spot=False)
def futures_historical_klines_generator(self, symbol, interval, start_str, end_str=None):
"""Get Historical Kline generator from Binance
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:return: generator of OHLCV values
"""
return self._historical_klines_generator(symbol, interval, start_str, end_str=end_str, spot=False)
def futures_mark_price(self, **params):
"""Get Mark Price and Funding Rate
https://binance-docs.github.io/apidocs/futures/en/#mark-price-market_data
"""
return self._request_futures_api('get', 'premiumIndex', data=params)
def futures_funding_rate(self, **params):
"""Get funding rate history
https://binance-docs.github.io/apidocs/futures/en/#get-funding-rate-history-market_data
"""
return self._request_futures_api('get', 'fundingRate', data=params)
def futures_ticker(self, **params):
"""24 hour rolling window price change statistics.
https://binance-docs.github.io/apidocs/futures/en/#24hr-ticker-price-change-statistics-market_data
"""
return self._request_futures_api('get', 'ticker/24hr', data=params)
def futures_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://binance-docs.github.io/apidocs/futures/en/#symbol-price-ticker-market_data
"""
return self._request_futures_api('get', 'ticker/price', data=params)
def futures_orderbook_ticker(self, **params):
"""Best price/qty on the order book for a symbol or symbols.
https://binance-docs.github.io/apidocs/futures/en/#symbol-order-book-ticker-market_data
"""
return self._request_futures_api('get', 'ticker/bookTicker', data=params)
def futures_liquidation_orders(self, **params):
"""Get all liquidation orders
https://binance-docs.github.io/apidocs/futures/en/#get-all-liquidation-orders-market_data
"""
return self._request_futures_api('get', 'ticker/allForceOrders', data=params)
def futures_open_interest(self, **params):
"""Get present open interest of a specific symbol.
https://binance-docs.github.io/apidocs/futures/en/#open-interest-market_data
"""
return self._request_futures_api('get', 'ticker/openInterest', data=params)
def futures_open_interest_hist(self, **params):
"""Get open interest statistics of a specific symbol.
https://binance-docs.github.io/apidocs/futures/en/#open-interest-statistics
"""
return self._request_futures_data_api('get', 'openInterestHist', data=params)
def futures_leverage_bracket(self, **params):
"""Notional and Leverage Brackets
https://binance-docs.github.io/apidocs/futures/en/#notional-and-leverage-brackets-market_data
"""
return self._request_futures_api('get', 'leverageBracket', True, data=params)
def futures_account_transfer(self, **params):
"""Execute transfer between spot account and futures account.
https://binance-docs.github.io/apidocs/futures/en/#new-future-account-transfer
"""
return self._request_margin_api('post', 'futures/transfer', True, data=params)
def transfer_history(self, **params):
"""Get future account transaction history list
https://binance-docs.github.io/apidocs/futures/en/#get-future-account-transaction-history-list-user_data
"""
return self._request_margin_api('get', 'futures/transfer', True, data=params)
def futures_create_order(self, **params):
"""Send in a new order.
https://binance-docs.github.io/apidocs/futures/en/#new-order-trade
"""
return self._request_futures_api('post', 'order', True, data=params)
def futures_get_order(self, **params):
"""Check an order's status.
https://binance-docs.github.io/apidocs/futures/en/#query-order-user_data
"""
return self._request_futures_api('get', 'order', True, data=params)
def futures_get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://binance-docs.github.io/apidocs/futures/en/#current-open-orders-user_data
"""
return self._request_futures_api('get', 'openOrders', True, data=params)
def futures_get_all_orders(self, **params):
"""Get all futures account orders; active, canceled, or filled.
https://binance-docs.github.io/apidocs/futures/en/#all-orders-user_data
"""
return self._request_futures_api('get', 'allOrders', True, data=params)
def futures_cancel_order(self, **params):
"""Cancel an active futures order.
https://binance-docs.github.io/apidocs/futures/en/#cancel-order-trade
"""
return self._request_futures_api('delete', 'order', True, data=params)
def futures_cancel_all_open_orders(self, **params):
"""Cancel all open futures orders
https://binance-docs.github.io/apidocs/futures/en/#cancel-all-open-orders-trade
"""
return self._request_futures_api('delete', 'allOpenOrders', True, data=params)
def futures_cancel_orders(self, **params):
"""Cancel multiple futures orders
https://binance-docs.github.io/apidocs/futures/en/#cancel-multiple-orders-trade
"""
return self._request_futures_api('delete', 'batchOrders', True, data=params)
def futures_account_balance(self, **params):
"""Get futures account balance
https://binance-docs.github.io/apidocs/futures/en/#future-account-balance-user_data
"""
return self._request_futures_api('get', 'balance', True, data=params)
def futures_account(self, **params):
"""Get current account information.
https://binance-docs.github.io/apidocs/futures/en/#account-information-user_data
"""
return self._request_futures_api('get', 'account', True, data=params)
def futures_change_leverage(self, **params):
"""Change user's initial leverage of specific symbol market
https://binance-docs.github.io/apidocs/futures/en/#change-initial-leverage-trade
"""
return self._request_futures_api('post', 'leverage', True, data=params)
def futures_change_margin_type(self, **params):
"""Change the margin type for a symbol
https://binance-docs.github.io/apidocs/futures/en/#change-margin-type-trade
"""
return self._request_futures_api('post', 'marginType', True, data=params)
def futures_change_position_margin(self, **params):
"""Change the position margin for a symbol
https://binance-docs.github.io/apidocs/futures/en/#modify-isolated-position-margin-trade
"""
return self._request_futures_api('post', 'positionMargin', True, data=params)
def futures_position_margin_history(self, **params):
"""Get position margin change history
https://binance-docs.github.io/apidocs/futures/en/#get-postion-margin-change-history-trade
"""
return self._request_futures_api('get', 'positionMargin/history', True, data=params)
def futures_position_information(self, **params):
"""Get position information
https://binance-docs.github.io/apidocs/futures/en/#position-information-user_data
"""
return self._request_futures_api('get', 'positionRisk', True, data=params)
def futures_account_trades(self, **params):
"""Get trades for the authenticated account and symbol.
https://binance-docs.github.io/apidocs/futures/en/#account-trade-list-user_data
"""
return self._request_futures_api('get', 'userTrades', True, data=params)
def futures_income_history(self, **params):
"""Get income history for authenticated account
https://binance-docs.github.io/apidocs/futures/en/#get-income-history-user_data
"""
return self._request_futures_api('get', 'income', True, data=params)
def futures_change_position_mode(self, **params):
"""Change position mode for authenticated account
https://binance-docs.github.io/apidocs/futures/en/#change-position-mode-trade
"""
return self._request_futures_api('post', 'positionSide/dual', True, data=params)
def futures_get_position_mode(self, **params):
"""Get position mode for authenticated account
https://binance-docs.github.io/apidocs/futures/en/#get-current-position-mode-user_data
"""
return self._request_futures_api('get', 'positionSide/dual', True, data=params)
# COIN Futures API
def futures_coin_ping(self):
"""Test connectivity to the Rest API
https://binance-docs.github.io/apidocs/delivery/en/#test-connectivity
"""
return self._request_futures_coin_api("get", "ping")
def futures_coin_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://binance-docs.github.io/apidocs/delivery/en/#check-server-time
"""
return self._request_futures_coin_api("get", "time")
def futures_coin_exchange_info(self):
"""Current exchange trading rules and symbol information
https://binance-docs.github.io/apidocs/delivery/en/#exchange-information
"""
return self._request_futures_coin_api("get", "exchangeInfo")
def futures_coin_order_book(self, **params):
"""Get the Order Book for the market
https://binance-docs.github.io/apidocs/delivery/en/#order-book
"""
return self._request_futures_coin_api("get", "depth", data=params)
def futures_coin_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://binance-docs.github.io/apidocs/delivery/en/#recent-trades-list
"""
return self._request_futures_coin_api("get", "trades", data=params)
def futures_coin_historical_trades(self, **params):
"""Get older market historical trades.
https://binance-docs.github.io/apidocs/delivery/en/#old-trades-lookup-market_data
"""
return self._request_futures_coin_api("get", "historicalTrades", data=params)
def futures_coin_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time, from the same order, with the same
price will have the quantity aggregated.
https://binance-docs.github.io/apidocs/delivery/en/#compressed-aggregate-trades-list
"""
return self._request_futures_coin_api("get", "aggTrades", data=params)
def futures_coin_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://binance-docs.github.io/apidocs/delivery/en/#kline-candlestick-data
"""
return self._request_futures_coin_api("get", "klines", data=params)
def futures_coin_continous_klines(self, **params):
"""Kline/candlestick bars for a specific contract type. Klines are uniquely identified by their open time.
https://binance-docs.github.io/apidocs/delivery/en/#continuous-contract-kline-candlestick-data
"""
return self._request_futures_coin_api("get", "continuousKlines", data=params)
def futures_coin_index_price_klines(self, **params):
"""Kline/candlestick bars for the index price of a pair..
https://binance-docs.github.io/apidocs/delivery/en/#index-price-kline-candlestick-data
"""
return self._request_futures_coin_api("get", "indexPriceKlines", data=params)
def futures_coin_mark_price_klines(self, **params):
"""Kline/candlestick bars for the index price of a pair..
https://binance-docs.github.io/apidocs/delivery/en/#mark-price-kline-candlestick-data
"""
return self._request_futures_coin_api("get", "markPriceKlines", data=params)
def futures_coin_mark_price(self, **params):
"""Get Mark Price and Funding Rate
https://binance-docs.github.io/apidocs/delivery/en/#index-price-and-mark-price
"""
return self._request_futures_coin_api("get", "premiumIndex", data=params)
def futures_coin_funding_rate(self, **params):
"""Get funding rate history
https://binance-docs.github.io/apidocs/delivery/en/#get-funding-rate-history-of-perpetual-futures
"""
return self._request_futures_coin_api("get", "fundingRate", data=params)
def futures_coin_ticker(self, **params):
"""24 hour rolling window price change statistics.
https://binance-docs.github.io/apidocs/delivery/en/#24hr-ticker-price-change-statistics
"""
return self._request_futures_coin_api("get", "ticker/24hr", data=params)
def futures_coin_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://binance-docs.github.io/apidocs/delivery/en/#symbol-price-ticker
"""
return self._request_futures_coin_api("get", "ticker/price", data=params)
def futures_coin_orderbook_ticker(self, **params):
"""Best price/qty on the order book for a symbol or symbols.
https://binance-docs.github.io/apidocs/delivery/en/#symbol-order-book-ticker
"""
return self._request_futures_coin_api("get", "ticker/bookTicker", data=params)
def futures_coin_liquidation_orders(self, **params):
"""Get all liquidation orders
https://binance-docs.github.io/apidocs/delivery/en/#get-all-liquidation-orders
"""
return self._request_futures_coin_api("get", "allForceOrders", data=params)
def futures_coin_open_interest(self, **params):
"""Get present open interest of a specific symbol.
https://binance-docs.github.io/apidocs/delivery/en/#open-interest
"""
return self._request_futures_coin_api("get", "openInterest", data=params)
def futures_coin_open_interest_hist(self, **params):
"""Get open interest statistics of a specific symbol.
https://binance-docs.github.io/apidocs/delivery/en/#open-interest-statistics-market-data
"""
return self._request_futures_coin_data_api("get", "openInterestHist", data=params)
def futures_coin_leverage_bracket(self, **params):
"""Notional and Leverage Brackets
https://binance-docs.github.io/apidocs/delivery/en/#notional-bracket-for-pair-user_data
"""
return self._request_futures_coin_api(
"get", "leverageBracket", version=2, signed=True, data=params
)
def new_transfer_history(self, **params):
"""Get future account transaction history list
https://binance-docs.github.io/apidocs/delivery/en/#new-future-account-transfer
"""
return self._request_margin_api("get", "asset/transfer", True, data=params)
# return self._request_margin_api("get", "futures/transfer", True, data=params)
def universal_transfer(self, **params):
"""Unviversal transfer api accross different binance account types
https://binance-docs.github.io/apidocs/spot/en/#user-universal-transfer
"""
return self._request_margin_api(
"post", "asset/transfer", signed=True, data=params
)
def futures_coin_create_order(self, **params):
"""Send in a new order.
https://binance-docs.github.io/apidocs/delivery/en/#new-order-trade
"""
return self._request_futures_coin_api("post", "order", True, data=params)
def futures_coin_get_order(self, **params):
"""Check an order's status.
https://binance-docs.github.io/apidocs/delivery/en/#query-order-user_data
"""
return self._request_futures_coin_api("get", "order", True, data=params)
def futures_coin_get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://binance-docs.github.io/apidocs/delivery/en/#current-all-open-orders-user_data
"""
return self._request_futures_coin_api("get", "openOrders", True, data=params)
def futures_coin_get_all_orders(self, **params):
"""Get all futures account orders; active, canceled, or filled.
https://binance-docs.github.io/apidocs/delivery/en/#all-orders-user_data
"""
return self._request_futures_coin_api(
"get", "allOrders", signed=True, data=params
)
def futures_coin_cancel_order(self, **params):
"""Cancel an active futures order.
https://binance-docs.github.io/apidocs/delivery/en/#cancel-order-trade
"""
return self._request_futures_coin_api(
"delete", "order", signed=True, data=params
)
def futures_coin_cancel_all_open_orders(self, **params):
"""Cancel all open futures orders
https://binance-docs.github.io/apidocs/delivery/en/#cancel-all-open-orders-trade
"""
return self._request_futures_coin_api(
"delete", "allOpenOrders", signed=True, data=params
)
def futures_coin_cancel_orders(self, **params):
"""Cancel multiple futures orders
https://binance-docs.github.io/apidocs/delivery/en/#cancel-multiple-orders-trade
"""
return self._request_futures_coin_api(
"delete", "batchOrders", True, data=params
)
def futures_coin_account_balance(self, **params):
"""Get futures account balance
https://binance-docs.github.io/apidocs/delivery/en/#futures-account-balance-user_data
"""
return self._request_futures_coin_api(
"get", "balance", signed=True, data=params
)
def futures_coin_account(self, **params):
"""Get current account information.
https://binance-docs.github.io/apidocs/delivery/en/#account-information-user_data
"""
return self._request_futures_coin_api(
"get", "account", signed=True, data=params
)
def futures_coin_change_leverage(self, **params):
"""Change user's initial leverage of specific symbol market
https://binance-docs.github.io/apidocs/delivery/en/#change-initial-leverage-trade
"""
return self._request_futures_coin_api(
"post", "leverage", signed=True, data=params
)
def futures_coin_change_margin_type(self, **params):
"""Change the margin type for a symbol
https://binance-docs.github.io/apidocs/delivery/en/#change-margin-type-trade
"""
return self._request_futures_coin_api(
"post", "marginType", signed=True, data=params
)
def futures_coin_change_position_margin(self, **params):
"""Change the position margin for a symbol
https://binance-docs.github.io/apidocs/delivery/en/#modify-isolated-position-margin-trade
"""
return self._request_futures_coin_api(
"post", "positionMargin", True, data=params
)
def futures_coin_position_margin_history(self, **params):
"""Get position margin change history
https://binance-docs.github.io/apidocs/delivery/en/#get-position-margin-change-history-trade
"""
return self._request_futures_coin_api(
"get", "positionMargin/history", True, data=params
)
def futures_coin_position_information(self, **params):
"""Get position information
https://binance-docs.github.io/apidocs/delivery/en/#position-information-user_data
"""
return self._request_futures_coin_api("get", "positionRisk", True, data=params)
def futures_coin_account_trades(self, **params):
"""Get trades for the authenticated account and symbol.
https://binance-docs.github.io/apidocs/delivery/en/#account-trade-list-user_data
"""
return self._request_futures_coin_api("get", "userTrades", True, data=params)
def futures_coin_income_history(self, **params):
"""Get income history for authenticated account
https://binance-docs.github.io/apidocs/delivery/en/#get-income-history-user_data
"""
return self._request_futures_coin_api("get", "income", True, data=params)
def futures_coin_change_position_mode(self, **params):
"""Change user's position mode (Hedge Mode or One-way Mode ) on EVERY symbol
https://binance-docs.github.io/apidocs/delivery/en/#change-position-mode-trade
"""
return self._request_futures_coin_api("post", "positionSide/dual", True, data=params)
def futures_coin_get_position_mode(self, **params):
"""Get user's position mode (Hedge Mode or One-way Mode ) on EVERY symbol
https://binance-docs.github.io/apidocs/delivery/en/#get-current-position-mode-user_data
"""
return self._request_futures_coin_api("get", "positionSide/dual", True, data=params)
def get_all_coins_info(self, **params):
"""Get information of coins (available for deposit and withdraw) for user.
https://binance-docs.github.io/apidocs/spot/en/#all-coins-39-information-user_data
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"coin": "BTC",
"depositAllEnable": true,
"withdrawAllEnable": true,
"name": "Bitcoin",
"free": "0",
"locked": "0",
"freeze": "0",
"withdrawing": "0",
"ipoing": "0",
"ipoable": "0",
"storage": "0",
"isLegalMoney": false,
"trading": true,
"networkList": [
{
"network": "BNB",
"coin": "BTC",
"withdrawIntegerMultiple": "0.00000001",
"isDefault": false,
"depositEnable": true,
"withdrawEnable": true,
"depositDesc": "",
"withdrawDesc": "",
"specialTips": "Both a MEMO and an Address are required to successfully deposit your BEP2-BTCB tokens to Binance.",
"name": "BEP2",
"resetAddressStatus": false,
"addressRegex": "^(bnb1)[0-9a-z]{38}$",
"memoRegex": "^[0-9A-Za-z-_]{1,120}$",
"withdrawFee": "0.0000026",
"withdrawMin": "0.0000052",
"withdrawMax": "0",
"minConfirm": 1,
"unLockConfirm": 0
},
{
"network": "BTC",
"coin": "BTC",
"withdrawIntegerMultiple": "0.00000001",
"isDefault": true,
"depositEnable": true,
"withdrawEnable": true,
"depositDesc": "",
"withdrawDesc": "",
"specialTips": "",
"name": "BTC",
"resetAddressStatus": false,
"addressRegex": "^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$|^(bc1)[0-9A-Za-z]{39,59}$",
"memoRegex": "",
"withdrawFee": "0.0005",
"withdrawMin": "0.001",
"withdrawMax": "0",
"minConfirm": 1,
"unLockConfirm": 2
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'capital/config/getall', True, data=params)
def get_account_snapshot(self, **params):
"""Get daily account snapshot of specific type.
https://binance-docs.github.io/apidocs/spot/en/#daily-account-snapshot-user_data
:param type: required. Valid values are SPOT/MARGIN/FUTURES.
:type type: string
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"code":200, // 200 for success; others are error codes
"msg":"", // error message
"snapshotVos":[
{
"data":{
"balances":[
{
"asset":"BTC",
"free":"0.09905021",
"locked":"0.00000000"
},
{
"asset":"USDT",
"free":"1.89109409",
"locked":"0.00000000"
}
],
"totalAssetOfBtc":"0.09942700"
},
"type":"spot",
"updateTime":1576281599000
}
]
}
OR
.. code-block:: python
{
"code":200, // 200 for success; others are error codes
"msg":"", // error message
"snapshotVos":[
{
"data":{
"marginLevel":"2748.02909813",
"totalAssetOfBtc":"0.00274803",
"totalLiabilityOfBtc":"0.00000100",
"totalNetAssetOfBtc":"0.00274750",
"userAssets":[
{
"asset":"XRP",
"borrowed":"0.00000000",
"free":"1.00000000",
"interest":"0.00000000",
"locked":"0.00000000",
"netAsset":"1.00000000"
}
]
},
"type":"margin",
"updateTime":1576281599000
}
]
}
OR
.. code-block:: python
{
"code":200, // 200 for success; others are error codes
"msg":"", // error message
"snapshotVos":[
{
"data":{
"assets":[
{
"asset":"USDT",
"marginBalance":"118.99782335",
"walletBalance":"120.23811389"
}
],
"position":[
{
"entryPrice":"7130.41000000",
"markPrice":"7257.66239673",
"positionAmt":"0.01000000",
"symbol":"BTCUSDT",
"unRealizedProfit":"1.24029054"
}
]
},
"type":"futures",
"updateTime":1576281599000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'accountSnapshot', True, data=params)
def disable_fast_withdraw_switch(self, **params):
"""Disable Fast Withdraw Switch
https://binance-docs.github.io/apidocs/spot/en/#disable-fast-withdraw-switch-user_data
:param recvWindow: optional
:type recvWindow: int
:returns: API response
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'disableFastWithdrawSwitch', True, data=params)
def enable_fast_withdraw_switch(self, **params):
"""Enable Fast Withdraw Switch
https://binance-docs.github.io/apidocs/spot/en/#enable-fast-withdraw-switch-user_data
:param recvWindow: optional
:type recvWindow: int
:returns: API response
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'enableFastWithdrawSwitch', True, data=params)
improved documentation
# coding=utf-8
import hashlib
import hmac
import requests
import time
from operator import itemgetter
from .helpers import date_to_milliseconds, interval_to_milliseconds
from .exceptions import BinanceAPIException, BinanceRequestException, BinanceWithdrawException
class Client(object):
API_URL = 'https://api.binance.{}/api'
WITHDRAW_API_URL = 'https://api.binance.{}/wapi'
MARGIN_API_URL = 'https://api.binance.{}/sapi'
WEBSITE_URL = 'https://www.binance.{}'
FUTURES_URL = 'https://fapi.binance.{}/fapi'
FUTURES_DATA_URL = 'https://fapi.binance.{}/futures/data'
FUTURES_COIN_URL = "https://dapi.binance.{}/dapi"
FUTURES_COIN_DATA_URL = "https://dapi.binance.{}/futures/data"
PUBLIC_API_VERSION = 'v1'
PRIVATE_API_VERSION = 'v3'
WITHDRAW_API_VERSION = 'v3'
MARGIN_API_VERSION = 'v1'
FUTURES_API_VERSION = 'v1'
FUTURES_API_VERSION2 = "v2"
SYMBOL_TYPE_SPOT = 'SPOT'
ORDER_STATUS_NEW = 'NEW'
ORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED'
ORDER_STATUS_FILLED = 'FILLED'
ORDER_STATUS_CANCELED = 'CANCELED'
ORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL'
ORDER_STATUS_REJECTED = 'REJECTED'
ORDER_STATUS_EXPIRED = 'EXPIRED'
KLINE_INTERVAL_1MINUTE = '1m'
KLINE_INTERVAL_3MINUTE = '3m'
KLINE_INTERVAL_5MINUTE = '5m'
KLINE_INTERVAL_15MINUTE = '15m'
KLINE_INTERVAL_30MINUTE = '30m'
KLINE_INTERVAL_1HOUR = '1h'
KLINE_INTERVAL_2HOUR = '2h'
KLINE_INTERVAL_4HOUR = '4h'
KLINE_INTERVAL_6HOUR = '6h'
KLINE_INTERVAL_8HOUR = '8h'
KLINE_INTERVAL_12HOUR = '12h'
KLINE_INTERVAL_1DAY = '1d'
KLINE_INTERVAL_3DAY = '3d'
KLINE_INTERVAL_1WEEK = '1w'
KLINE_INTERVAL_1MONTH = '1M'
SIDE_BUY = 'BUY'
SIDE_SELL = 'SELL'
ORDER_TYPE_LIMIT = 'LIMIT'
ORDER_TYPE_MARKET = 'MARKET'
ORDER_TYPE_STOP_LOSS = 'STOP_LOSS'
ORDER_TYPE_STOP_LOSS_LIMIT = 'STOP_LOSS_LIMIT'
ORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'
ORDER_TYPE_TAKE_PROFIT_LIMIT = 'TAKE_PROFIT_LIMIT'
ORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER'
TIME_IN_FORCE_GTC = 'GTC' # Good till cancelled
TIME_IN_FORCE_IOC = 'IOC' # Immediate or cancel
TIME_IN_FORCE_FOK = 'FOK' # Fill or kill
ORDER_RESP_TYPE_ACK = 'ACK'
ORDER_RESP_TYPE_RESULT = 'RESULT'
ORDER_RESP_TYPE_FULL = 'FULL'
# For accessing the data returned by Client.aggregate_trades().
AGG_ID = 'a'
AGG_PRICE = 'p'
AGG_QUANTITY = 'q'
AGG_FIRST_TRADE_ID = 'f'
AGG_LAST_TRADE_ID = 'l'
AGG_TIME = 'T'
AGG_BUYER_MAKES = 'm'
AGG_BEST_MATCH = 'M'
# new asset transfer api enum
SPOT_TO_FIAT = "MAIN_C2C"
SPOT_TO_USDT_FUTURE = "MAIN_UMFUTURE"
SPOT_TO_COIN_FUTURE = "MAIN_CMFUTURE"
SPOT_TO_MARGIN_CROSS = "MAIN_MARGIN"
SPOT_TO_MINING = "MAIN_MINING"
FIAT_TO_SPOT = "C2C_MAIN"
FIAT_TO_USDT_FUTURE = "C2C_UMFUTURE"
FIAT_TO_MINING = "C2C_MINING"
USDT_FUTURE_TO_SPOT = "UMFUTURE_MAIN"
USDT_FUTURE_TO_FIAT = "UMFUTURE_C2C"
USDT_FUTURE_TO_MARGIN_CROSS = "UMFUTURE_MARGIN"
COIN_FUTURE_TO_SPOT = "CMFUTURE_MAIN"
MARGIN_CROSS_TO_SPOT = "MARGIN_MAIN"
MARGIN_CROSS_TO_USDT_FUTURE = "MARGIN_UMFUTURE"
MINING_TO_SPOT = "MINING_MAIN"
MINING_TO_USDT_FUTURE = "MINING_UMFUTURE"
MINING_TO_FIAT = "MINING_C2C"
def __init__(self, api_key=None, api_secret=None, requests_params=None, tld='com'):
"""Binance API Client constructor
:param api_key: Api Key
:type api_key: str.
:param api_secret: Api Secret
:type api_secret: str.
:param requests_params: optional - Dictionary of requests params to use for all calls
:type requests_params: dict.
"""
self.API_URL = self.API_URL.format(tld)
self.WITHDRAW_API_URL = self.WITHDRAW_API_URL.format(tld)
self.MARGIN_API_URL = self.MARGIN_API_URL.format(tld)
self.WEBSITE_URL = self.WEBSITE_URL.format(tld)
self.FUTURES_URL = self.FUTURES_URL.format(tld)
self.FUTURES_DATA_URL = self.FUTURES_DATA_URL.format(tld)
self.FUTURES_COIN_URL = self.FUTURES_COIN_URL.format(tld)
self.FUTURES_COIN_DATA_URL = self.FUTURES_COIN_DATA_URL.format(tld)
self.API_KEY = api_key
self.API_SECRET = api_secret
self.session = self._init_session()
self._requests_params = requests_params
self.response = None
self.timestamp_offset = 0
# init DNS and SSL cert
self.ping()
# calculate timestamp offset between local and binance server
res = self.get_server_time()
self.timestamp_offset = res['serverTime'] - int(time.time() * 1000)
def _init_session(self):
session = requests.session()
session.headers.update({'Accept': 'application/json',
'User-Agent': 'binance/python',
'X-MBX-APIKEY': self.API_KEY})
return session
def _create_api_uri(self, path, signed=True, version=PUBLIC_API_VERSION):
v = self.PRIVATE_API_VERSION if signed else version
return self.API_URL + '/' + v + '/' + path
def _create_withdraw_api_uri(self, path):
return self.WITHDRAW_API_URL + '/' + self.WITHDRAW_API_VERSION + '/' + path
def _create_margin_api_uri(self, path):
return self.MARGIN_API_URL + '/' + self.MARGIN_API_VERSION + '/' + path
def _create_website_uri(self, path):
return self.WEBSITE_URL + '/' + path
def _create_futures_api_uri(self, path):
return self.FUTURES_URL + '/' + self.FUTURES_API_VERSION + '/' + path
def _create_futures_data_api_uri(self, path):
return self.FUTURES_DATA_URL + '/' + path
def _create_futures_coin_api_url(self, path, version=1):
options = {1: self.FUTURES_API_VERSION, 2: self.FUTURES_API_VERSION2}
return self.FUTURES_COIN_URL + "/" + options[version] + "/" + path
def _create_futures_coin_data_api_url(self, path, version=1):
return self.FUTURES_COIN_DATA_URL + "/" + path
def _generate_signature(self, data):
ordered_data = self._order_params(data)
query_string = '&'.join(["{}={}".format(d[0], d[1]) for d in ordered_data])
m = hmac.new(self.API_SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256)
return m.hexdigest()
def _order_params(self, data):
"""Convert params to list with signature as last element
:param data:
:return:
"""
has_signature = False
params = []
for key, value in data.items():
if key == 'signature':
has_signature = True
else:
params.append((key, value))
# sort parameters by key
params.sort(key=itemgetter(0))
if has_signature:
params.append(('signature', data['signature']))
return params
def _request(self, method, uri, signed, force_params=False, **kwargs):
# set default requests timeout
kwargs['timeout'] = 10
# add our global requests params
if self._requests_params:
kwargs.update(self._requests_params)
data = kwargs.get('data', None)
if data and isinstance(data, dict):
kwargs['data'] = data
# find any requests params passed and apply them
if 'requests_params' in kwargs['data']:
# merge requests params into kwargs
kwargs.update(kwargs['data']['requests_params'])
del(kwargs['data']['requests_params'])
if signed:
# generate signature
kwargs['data']['timestamp'] = int(time.time() * 1000 + self.timestamp_offset)
kwargs['data']['signature'] = self._generate_signature(kwargs['data'])
# sort get and post params to match signature order
if data:
# sort post params
kwargs['data'] = self._order_params(kwargs['data'])
# Remove any arguments with values of None.
null_args = [i for i, (key, value) in enumerate(kwargs['data']) if value is None]
for i in reversed(null_args):
del kwargs['data'][i]
# if get request assign data array to params value for requests lib
if data and (method == 'get' or force_params):
kwargs['params'] = '&'.join('%s=%s' % (data[0], data[1]) for data in kwargs['data'])
del(kwargs['data'])
self.response = getattr(self.session, method)(uri, **kwargs)
return self._handle_response()
def _request_api(self, method, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
uri = self._create_api_uri(path, signed, version)
return self._request(method, uri, signed, **kwargs)
def _request_withdraw_api(self, method, path, signed=False, **kwargs):
uri = self._create_withdraw_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_margin_api(self, method, path, signed=False, **kwargs):
uri = self._create_margin_api_uri(path)
return self._request(method, uri, signed, **kwargs)
def _request_website(self, method, path, signed=False, **kwargs):
uri = self._create_website_uri(path)
return self._request(method, uri, signed, **kwargs)
def _request_futures_api(self, method, path, signed=False, **kwargs):
uri = self._create_futures_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_futures_data_api(self, method, path, signed=False, **kwargs):
uri = self._create_futures_data_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_futures_coin_api(self, method, path, signed=False, version=1, **kwargs):
uri = self._create_futures_coin_api_url(path, version=version)
return self._request(method, uri, signed, True, **kwargs)
def _request_futures_coin_data_api(self, method, path, signed=False, version=1, **kwargs):
uri = self._create_futures_coin_data_api_url(path, version=version)
return self._request(method, uri, signed, True, **kwargs)
def _handle_response(self):
"""Internal helper for handling API responses from the Binance server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if not (200 <= self.response.status_code < 300):
raise BinanceAPIException(self.response)
try:
return self.response.json()
except ValueError:
raise BinanceRequestException('Invalid Response: %s' % self.response.text)
def _get(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('get', path, signed, version, **kwargs)
def _post(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('post', path, signed, version, **kwargs)
def _put(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('put', path, signed, version, **kwargs)
def _delete(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('delete', path, signed, version, **kwargs)
# Exchange Endpoints
def get_products(self):
"""Return list of products currently listed on Binance
Use get_exchange_info() call instead
:returns: list - List of product dictionaries
:raises: BinanceRequestException, BinanceAPIException
"""
products = self._request_website('get', 'exchange-api/v1/public/asset-service/product/get-products')
return products
def get_exchange_info(self):
"""Return rate limits and list of symbols
:returns: list - List of product dictionaries
.. code-block:: python
{
"timezone": "UTC",
"serverTime": 1508631584636,
"rateLimits": [
{
"rateLimitType": "REQUESTS",
"interval": "MINUTE",
"limit": 1200
},
{
"rateLimitType": "ORDERS",
"interval": "SECOND",
"limit": 10
},
{
"rateLimitType": "ORDERS",
"interval": "DAY",
"limit": 100000
}
],
"exchangeFilters": [],
"symbols": [
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('exchangeInfo', version=self.PRIVATE_API_VERSION)
def get_symbol_info(self, symbol):
"""Return information about a symbol
:param symbol: required e.g BNBBTC
:type symbol: str
:returns: Dict if found, None if not
.. code-block:: python
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self._get('exchangeInfo', version=self.PRIVATE_API_VERSION)
for item in res['symbols']:
if item['symbol'] == symbol.upper():
return item
return None
# General Endpoints
def ping(self):
"""Test connectivity to the Rest API.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#test-connectivity
:returns: Empty array
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ping', version=self.PRIVATE_API_VERSION)
def get_server_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#check-server-time
:returns: Current server time
.. code-block:: python
{
"serverTime": 1499827319559
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('time', version=self.PRIVATE_API_VERSION)
# Market Data Endpoints
def get_all_tickers(self):
"""Latest price for all symbols.
https://www.binance.com/restapipub.html#symbols-price-ticker
:returns: List of market tickers
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/price', version=self.PRIVATE_API_VERSION)
def get_orderbook_tickers(self):
"""Best price/qty on the order book for all symbols.
https://www.binance.com/restapipub.html#symbols-order-book-ticker
:returns: List of order book market entries
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/bookTicker', version=self.PRIVATE_API_VERSION)
def get_order_book(self, **params):
"""Get the Order Book for the market
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#order-book
:param symbol: required
:type symbol: str
:param limit: Default 100; max 1000
:type limit: int
:returns: API response
.. code-block:: python
{
"lastUpdateId": 1027024,
"bids": [
[
"4.00000000", # PRICE
"431.00000000", # QTY
[] # Can be ignored
]
],
"asks": [
[
"4.00000200",
"12.00000000",
[]
]
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('depth', data=params, version=self.PRIVATE_API_VERSION)
def get_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('trades', data=params)
def get_historical_trades(self, **params):
"""Get older trades.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: str
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('historicalTrades', data=params, version=self.PRIVATE_API_VERSION)
def get_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time,
from the same order, with the same price will have the quantity aggregated.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
:param symbol: required
:type symbol: str
:param fromId: ID to get aggregate trades from INCLUSIVE.
:type fromId: str
:param startTime: Timestamp in ms to get aggregate trades from INCLUSIVE.
:type startTime: int
:param endTime: Timestamp in ms to get aggregate trades until INCLUSIVE.
:type endTime: int
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"a": 26129, # Aggregate tradeId
"p": "0.01633102", # Price
"q": "4.70443515", # Quantity
"f": 27781, # First tradeId
"l": 27781, # Last tradeId
"T": 1498793709153, # Timestamp
"m": true, # Was the buyer the maker?
"M": true # Was the trade the best price match?
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('aggTrades', data=params, version=self.PRIVATE_API_VERSION)
def aggregate_trade_iter(self, symbol, start_str=None, last_id=None):
"""Iterate over aggregate trade data from (start_time or last_id) to
the end of the history so far.
If start_time is specified, start with the first trade after
start_time. Meant to initialise a local cache of trade data.
If last_id is specified, start with the trade after it. This is meant
for updating a pre-existing local trade data cache.
Only allows start_str or last_id—not both. Not guaranteed to work
right if you're running more than one of these simultaneously. You
will probably hit your rate limit.
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Symbol string e.g. ETHBTC
:type symbol: str
:param start_str: Start date string in UTC format or timestamp in milliseconds. The iterator will
return the first trade occurring later than this time.
:type start_str: str|int
:param last_id: aggregate trade ID of the last known aggregate trade.
Not a regular trade ID. See https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list.
:returns: an iterator of JSON objects, one per trade. The format of
each object is identical to Client.aggregate_trades().
:type last_id: int
"""
if start_str is not None and last_id is not None:
raise ValueError(
'start_time and last_id may not be simultaneously specified.')
# If there's no last_id, get one.
if last_id is None:
# Without a last_id, we actually need the first trade. Normally,
# we'd get rid of it. See the next loop.
if start_str is None:
trades = self.get_aggregate_trades(symbol=symbol, fromId=0)
else:
# The difference between startTime and endTime should be less
# or equal than an hour and the result set should contain at
# least one trade.
if type(start_str) == int:
start_ts = start_str
else:
start_ts = date_to_milliseconds(start_str)
# If the resulting set is empty (i.e. no trades in that interval)
# then we just move forward hour by hour until we find at least one
# trade or reach present moment
while True:
end_ts = start_ts + (60 * 60 * 1000)
trades = self.get_aggregate_trades(
symbol=symbol,
startTime=start_ts,
endTime=end_ts)
if len(trades) > 0:
break
# If we reach present moment and find no trades then there is
# nothing to iterate, so we're done
if end_ts > int(time.time() * 1000):
return
start_ts = end_ts
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
while True:
# There is no need to wait between queries, to avoid hitting the
# rate limit. We're using blocking IO, and as long as we're the
# only thread running calls like this, Binance will automatically
# add the right delay time on their end, forcing us to wait for
# data. That really simplifies this function's job. Binance is
# fucking awesome.
trades = self.get_aggregate_trades(symbol=symbol, fromId=last_id)
# fromId=n returns a set starting with id n, but we already have
# that one. So get rid of the first item in the result set.
trades = trades[1:]
if len(trades) == 0:
return
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
def get_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#klinecandlestick-data
:param symbol: required
:type symbol: str
:param interval: -
:type interval: str
:param limit: - Default 500; max 500.
:type limit: int
:param startTime:
:type startTime: int
:param endTime:
:type endTime: int
:returns: API response
.. code-block:: python
[
[
1499040000000, # Open time
"0.01634790", # Open
"0.80000000", # High
"0.01575800", # Low
"0.01577100", # Close
"148976.11427815", # Volume
1499644799999, # Close time
"2434.19055334", # Quote asset volume
308, # Number of trades
"1756.87402397", # Taker buy base asset volume
"28.46694368", # Taker buy quote asset volume
"17928899.62484339" # Can be ignored
]
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('klines', data=params, version=self.PRIVATE_API_VERSION)
def _klines(self, spot=True, **params):
"""Get klines of spot (get_klines) or futures (futures_klines) endpoints.
:param spot: Spot klines functions, otherwise futures
:type spot: bool
:return: klines, see get_klines
"""
if spot:
return self.get_klines(**params)
else:
return self.futures_klines(**params)
def _get_earliest_valid_timestamp(self, symbol, interval, spot):
"""Get earliest valid open timestamp from Binance
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param spot: Spot endpoint, otherwise futures
:type spot: bool
:return: first valid timestamp
"""
kline = self._klines(spot=spot,
symbol=symbol,
interval=interval,
limit=1,
startTime=0,
endTime=None
)
return kline[0][0]
def get_historical_klines(self, symbol, interval, start_str, end_str=None,
limit=500):
"""Get Historical Klines from Binance
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:param limit: Default 500; max 1000.
:type limit: int
:return: list of OHLCV values
"""
return self._historical_klines(symbol, interval, start_str, end_str=None, limit=500, spot=True)
def _historical_klines(self, symbol, interval, start_str, end_str=None,
limit=500, spot=True):
"""Get Historical Klines from Binance (spot or futures)
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:param limit: Default 500; max 1000.
:type limit: int
:param limit: Default 500; max 1000.
:type limit: int
:param spot: Historical klines from spot endpoint, otherwise futures
:type spot: bool
:return: list of OHLCV values
"""
# init our list
output_data = []
# setup the max limit
limit = limit
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
if type(start_str) == int:
start_ts = start_str
else:
start_ts = date_to_milliseconds(start_str)
# establish first available start timestamp
first_valid_ts = self._get_earliest_valid_timestamp(symbol, interval, spot)
start_ts = max(start_ts, first_valid_ts)
# if an end time was passed convert it
end_ts = None
if end_str:
if type(end_str) == int:
end_ts = end_str
else:
end_ts = date_to_milliseconds(end_str)
idx = 0
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = self._klines(
spot=spot,
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where exactly the limit amount of data was returned last loop
if not len(temp_data):
break
# append this loops data to our output data
output_data += temp_data
# set our start timestamp using the last value in the array
start_ts = temp_data[-1][0]
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# increment next call by our timeframe
start_ts += timeframe
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data
def get_historical_klines_generator(self, symbol, interval, start_str, end_str=None):
"""Get Historical Klines generator from Binance
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:return: generator of OHLCV values
"""
return self._historical_klines_generator(symbol, interval, start_str, end_str=end_str, spot=True)
def _historical_klines_generator(self, symbol, interval, start_str, end_str=None, spot=True):
"""Get Historical Klines generator from Binance (spot or futures)
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:param spot: Historical klines generator from spot endpoint, otherwise futures
:type spot: bool
:return: generator of OHLCV values
"""
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
if type(start_str) == int:
start_ts = start_str
else:
start_ts = date_to_milliseconds(start_str)
# establish first available start timestamp
first_valid_ts = self._get_earliest_valid_timestamp(symbol, interval, spot)
start_ts = max(start_ts, first_valid_ts)
# if an end time was passed convert it
end_ts = None
if end_str:
if type(end_str) == int:
end_ts = end_str
else:
end_ts = date_to_milliseconds(end_str)
idx = 0
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
output_data = self.get_klines(
spot=spot,
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where exactly the limit amount of data was returned last loop
if not len(output_data):
break
# yield data
for o in output_data:
yield o
# set our start timestamp using the last value in the array
start_ts = output_data[-1][0]
idx += 1
# check if we received less than the required limit and exit the loop
if len(output_data) < limit:
# exit the while loop
break
# increment next call by our timeframe
start_ts += timeframe
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
def get_avg_price(self, **params):
"""Current average price for a symbol.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#current-average-price
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"mins": 5,
"price": "9.35751834"
}
"""
return self._get('avgPrice', data=params, version=self.PRIVATE_API_VERSION)
def get_ticker(self, **params):
"""24 hour price change statistics.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
OR
.. code-block:: python
[
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/24hr', data=params, version=self.PRIVATE_API_VERSION)
def get_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"price": "4.00000200"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/price', data=params, version=self.PRIVATE_API_VERSION)
def get_orderbook_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#symbol-order-book-ticker
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('ticker/bookTicker', data=params, version=self.PRIVATE_API_VERSION)
# Account Endpoints
def create_order(self, **params):
"""Send in a new order
Any order with an icebergQty MUST have timeInForce set to GTC.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#new-order--trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param quoteOrderQty: amount the user wants to spend (when buying) or receive (when selling)
of the quote asset, applicable to MARKET orders
:type quoteOrderQty: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
Response ACK:
.. code-block:: python
{
"symbol":"LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1" # Will be newClientOrderId
"transactTime": 1499827319559
}
Response RESULT:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
}
Response FULL:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "4000.00000000",
"qty": "1.00000000",
"commission": "4.00000000",
"commissionAsset": "USDT"
},
{
"price": "3999.00000000",
"qty": "5.00000000",
"commission": "19.99500000",
"commissionAsset": "USDT"
},
{
"price": "3998.00000000",
"qty": "2.00000000",
"commission": "7.99600000",
"commissionAsset": "USDT"
},
{
"price": "3997.00000000",
"qty": "1.00000000",
"commission": "3.99700000",
"commissionAsset": "USDT"
},
{
"price": "3995.00000000",
"qty": "1.00000000",
"commission": "3.99500000",
"commissionAsset": "USDT"
}
]
}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order', True, data=params)
def order_limit(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_LIMIT,
'timeInForce': timeInForce
})
return self.create_order(**params)
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY,
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_limit_sell(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_market(self, **params):
"""Send in a new market order
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param quoteOrderQty: amount the user wants to spend (when buying) or receive (when selling)
of the quote asset
:type quoteOrderQty: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_MARKET
})
return self.create_order(**params)
def order_market_buy(self, **params):
"""Send in a new market buy order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param quoteOrderQty: the amount the user wants to spend of the quote asset
:type quoteOrderQty: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY
})
return self.order_market(**params)
def order_market_sell(self, **params):
"""Send in a new market sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param quoteOrderQty: the amount the user wants to receive of the quote asset
:type quoteOrderQty: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_market(**params)
def create_oco_order(self, **params):
"""Send in a new OCO order
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#new-oco-trade
:param symbol: required
:type symbol: str
:param listClientOrderId: A unique id for the list order. Automatically generated if not sent.
:type listClientOrderId: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param limitClientOrderId: A unique id for the limit order. Automatically generated if not sent.
:type limitClientOrderId: str
:param price: required
:type price: str
:param limitIcebergQty: Used to make the LIMIT_MAKER leg an iceberg order.
:type limitIcebergQty: decimal
:param stopClientOrderId: A unique id for the stop order. Automatically generated if not sent.
:type stopClientOrderId: str
:param stopPrice: required
:type stopPrice: str
:param stopLimitPrice: If provided, stopLimitTimeInForce is required.
:type stopLimitPrice: str
:param stopIcebergQty: Used with STOP_LOSS_LIMIT leg to make an iceberg order.
:type stopIcebergQty: decimal
:param stopLimitTimeInForce: Valid values are GTC/FOK/IOC.
:type stopLimitTimeInForce: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
Response ACK:
.. code-block:: python
{
}
Response RESULT:
.. code-block:: python
{
}
Response FULL:
.. code-block:: python
{
}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order/oco', True, data=params)
def order_oco_buy(self, **params):
"""Send in a new OCO buy order
:param symbol: required
:type symbol: str
:param listClientOrderId: A unique id for the list order. Automatically generated if not sent.
:type listClientOrderId: str
:param quantity: required
:type quantity: decimal
:param limitClientOrderId: A unique id for the limit order. Automatically generated if not sent.
:type limitClientOrderId: str
:param price: required
:type price: str
:param limitIcebergQty: Used to make the LIMIT_MAKER leg an iceberg order.
:type limitIcebergQty: decimal
:param stopClientOrderId: A unique id for the stop order. Automatically generated if not sent.
:type stopClientOrderId: str
:param stopPrice: required
:type stopPrice: str
:param stopLimitPrice: If provided, stopLimitTimeInForce is required.
:type stopLimitPrice: str
:param stopIcebergQty: Used with STOP_LOSS_LIMIT leg to make an iceberg order.
:type stopIcebergQty: decimal
:param stopLimitTimeInForce: Valid values are GTC/FOK/IOC.
:type stopLimitTimeInForce: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See OCO order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY
})
return self.create_oco_order(**params)
def order_oco_sell(self, **params):
"""Send in a new OCO sell order
:param symbol: required
:type symbol: str
:param listClientOrderId: A unique id for the list order. Automatically generated if not sent.
:type listClientOrderId: str
:param quantity: required
:type quantity: decimal
:param limitClientOrderId: A unique id for the limit order. Automatically generated if not sent.
:type limitClientOrderId: str
:param price: required
:type price: str
:param limitIcebergQty: Used to make the LIMIT_MAKER leg an iceberg order.
:type limitIcebergQty: decimal
:param stopClientOrderId: A unique id for the stop order. Automatically generated if not sent.
:type stopClientOrderId: str
:param stopPrice: required
:type stopPrice: str
:param stopLimitPrice: If provided, stopLimitTimeInForce is required.
:type stopLimitPrice: str
:param stopIcebergQty: Used with STOP_LOSS_LIMIT leg to make an iceberg order.
:type stopIcebergQty: decimal
:param stopLimitTimeInForce: Valid values are GTC/FOK/IOC.
:type stopLimitTimeInForce: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See OCO order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.create_oco_order(**params)
def create_test_order(self, **params):
"""Test new order creation and signature/recvWindow long. Creates and validates a new order but does not send it into the matching engine.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#test-new-order-trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: The number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order/test', True, data=params)
def get_order(self, **params):
"""Check an order's status. Either orderId or origClientOrderId must be sent.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#query-order-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('order', True, data=params)
def get_all_orders(self, **params):
"""Get all account orders; active, canceled, or filled.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#all-orders-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param limit: Default 500; max 500.
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('allOrders', True, data=params)
def cancel_order(self, **params):
"""Cancel an active order. Either orderId or origClientOrderId must be sent.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#cancel-order-trade
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param newClientOrderId: Used to uniquely identify this cancel. Automatically generated by default.
:type newClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"origClientOrderId": "myOrder1",
"orderId": 1,
"clientOrderId": "cancelMyOrder1"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._delete('order', True, data=params)
def get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#current-open-orders-user_data
:param symbol: optional
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('openOrders', True, data=params)
# User Stream Endpoints
def get_account(self, **params):
"""Get current account information.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#account-information-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"makerCommission": 15,
"takerCommission": 15,
"buyerCommission": 0,
"sellerCommission": 0,
"canTrade": true,
"canWithdraw": true,
"canDeposit": true,
"balances": [
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
},
{
"asset": "LTC",
"free": "4763368.68006011",
"locked": "0.00000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('account', True, data=params)
def get_asset_balance(self, asset, **params):
"""Get current asset balance.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#account-information-user_data
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: dictionary or None if not found
.. code-block:: python
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self.get_account(**params)
# find asset balance in list of balances
if "balances" in res:
for bal in res['balances']:
if bal['asset'].lower() == asset.lower():
return bal
return None
def get_my_trades(self, **params):
"""Get trades for a specific symbol.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#account-trade-list-user_data
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"commission": "10.10000000",
"commissionAsset": "BNB",
"time": 1499865549590,
"isBuyer": true,
"isMaker": false,
"isBestMatch": true
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._get('myTrades', True, data=params)
def get_system_status(self):
"""Get system status detail.
https://binance-docs.github.io/apidocs/spot/en/#system-status-system
:returns: API response
.. code-block:: python
{
"status": 0, # 0: normal,1:system maintenance
"msg": "normal" # normal or System maintenance.
}
:raises: BinanceAPIException
"""
return self._request_withdraw_api('get', 'systemStatus.html')
def get_account_status(self, **params):
"""Get account status detail.
https://binance-docs.github.io/apidocs/spot/en/#account-status-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "Order failed:Low Order fill rate! Will be reactivated after 5 minutes.",
"success": true,
"objs": [
"5"
]
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'accountStatus.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def get_account_api_trading_status(self, **params):
"""Fetch account api trading status detail.
https://binance-docs.github.io/apidocs/spot/en/#account-api-trading-status-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true, // Query result
"status": { // API trading status detail
"isLocked": false, // API trading function is locked or not
"plannedRecoverTime": 0, // If API trading function is locked, this is the planned recover time
"triggerCondition": {
"gcr": 150, // Number of GTC orders
"ifer": 150, // Number of FOK/IOC orders
"ufr": 300 // Number of orders
},
"indicators": { // The indicators updated every 30 seconds
"BTCUSDT": [ // The symbol
{
"i": "UFR", // Unfilled Ratio (UFR)
"c": 20, // Count of all orders
"v": 0.05, // Current UFR value
"t": 0.995 // Trigger UFR value
},
{
"i": "IFER", // IOC/FOK Expiration Ratio (IFER)
"c": 20, // Count of FOK/IOC orders
"v": 0.99, // Current IFER value
"t": 0.99 // Trigger IFER value
},
{
"i": "GCR", // GTC Cancellation Ratio (GCR)
"c": 20, // Count of GTC orders
"v": 0.99, // Current GCR value
"t": 0.99 // Trigger GCR value
}
],
"ETHUSDT": [
{
"i": "UFR",
"c": 20,
"v": 0.05,
"t": 0.995
},
{
"i": "IFER",
"c": 20,
"v": 0.99,
"t": 0.99
},
{
"i": "GCR",
"c": 20,
"v": 0.99,
"t": 0.99
}
]
},
"updateTime": 1547630471725 // The query result return time
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'apiTradingStatus.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def get_dust_log(self, **params):
"""Get log of small amounts exchanged for BNB.
https://binance-docs.github.io/apidocs/spot/en/#dustlog-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"results": {
"total": 2, //Total counts of exchange
"rows": [
{
"transfered_total": "0.00132256", # Total transfered BNB amount for this exchange.
"service_charge_total": "0.00002699", # Total service charge amount for this exchange.
"tran_id": 4359321,
"logs": [ # Details of this exchange.
{
"tranId": 4359321,
"serviceChargeAmount": "0.000009",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.000441",
"fromAsset": "USDT"
},
{
"tranId": 4359321,
"serviceChargeAmount": "0.00001799",
"uid": "10000015",
"amount": "0.0009",
"operateTime": "2018-05-03 17:07:04",
"transferedAmount": "0.00088156",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-03 17:07:04" //The time of this exchange.
},
{
"transfered_total": "0.00058795",
"service_charge_total": "0.000012",
"tran_id": 4357015,
"logs": [ // Details of this exchange.
{
"tranId": 4357015,
"serviceChargeAmount": "0.00001",
"uid": "10000015",
"amount": "0.001",
"operateTime": "2018-05-02 13:52:24",
"transferedAmount": "0.00049",
"fromAsset": "USDT"
},
{
"tranId": 4357015,
"serviceChargeAmount": "0.000002",
"uid": "10000015",
"amount": "0.0001",
"operateTime": "2018-05-02 13:51:11",
"transferedAmount": "0.00009795",
"fromAsset": "ETH"
}
],
"operate_time": "2018-05-02 13:51:11"
}
]
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'userAssetDribbletLog.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def transfer_dust(self, **params):
"""Convert dust assets to BNB.
https://binance-docs.github.io/apidocs/spot/en/#dust-transfer-user_data
:param asset: The asset being converted. e.g: 'ONE'
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
result = client.transfer_dust(asset='ONE')
:returns: API response
.. code-block:: python
{
"totalServiceCharge":"0.02102542",
"totalTransfered":"1.05127099",
"transferResult":[
{
"amount":"0.03000000",
"fromAsset":"ETH",
"operateTime":1563368549307,
"serviceChargeAmount":"0.00500000",
"tranId":2970932918,
"transferedAmount":"0.25000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'asset/dust', True, data=params)
def get_asset_dividend_history(self, **params):
"""Query asset dividend record.
https://binance-docs.github.io/apidocs/spot/en/#asset-dividend-record-user_data
:param asset: optional
:type asset: str
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
result = client.get_asset_dividend_history()
:returns: API response
.. code-block:: python
{
"rows":[
{
"amount":"10.00000000",
"asset":"BHFT",
"divTime":1563189166000,
"enInfo":"BHFT distribution",
"tranId":2968885920
},
{
"amount":"10.00000000",
"asset":"BHFT",
"divTime":1563189165000,
"enInfo":"BHFT distribution",
"tranId":2968885920
}
],
"total":2
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'asset/assetDividend', True, data=params)
def make_universal_transfer(self, **params):
"""User Universal Transfer
https://binance-docs.github.io/apidocs/spot/en/#user-universal-transfer
:param type: required
:type type: str (ENUM)
:param asset: required
:type asset: str
:param amount: required
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer_status = client.make_universal_transfer(params)
:returns: API response
.. code-block:: python
{
"tranId":13526853623
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'asset/transfer', signed=True, data=params)
def query_universal_transfer_history(self, **params):
"""Query User Universal Transfer History
https://binance-docs.github.io/apidocs/spot/en/#query-user-universal-transfer-history
:param type: required
:type type: str (ENUM)
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param current: optional - Default 1
:type current: int
:param size: required - Default 10, Max 100
:type size: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer_status = client.query_universal_transfer_history(params)
:returns: API response
.. code-block:: python
{
"total":2,
"rows":[
{
"asset":"USDT",
"amount":"1",
"type":"MAIN_UMFUTURE"
"status": "CONFIRMED",
"tranId": 11415955596,
"timestamp":1544433328000
},
{
"asset":"USDT",
"amount":"2",
"type":"MAIN_UMFUTURE",
"status": "CONFIRMED",
"tranId": 11366865406,
"timestamp":1544433328000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'asset/transfer', signed=True, data=params)
def get_trade_fee(self, **params):
"""Get trade fee.
https://binance-docs.github.io/apidocs/spot/en/#trade-fee-user_data
:param symbol: optional
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"tradeFee": [
{
"symbol": "ADABNB",
"maker": 0.9000,
"taker": 1.0000
}, {
"symbol": "BNBBTC",
"maker": 0.3000,
"taker": 0.3000
}
],
"success": true
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'tradeFee.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def get_asset_details(self, **params):
"""Fetch details on assets.
https://binance-docs.github.io/apidocs/spot/en/#asset-detail-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"assetDetail": {
"CTR": {
"minWithdrawAmount": "70.00000000", //min withdraw amount
"depositStatus": false,//deposit status
"withdrawFee": 35, // withdraw fee
"withdrawStatus": true, //withdraw status
"depositTip": "Delisted, Deposit Suspended" //reason
},
"SKY": {
"minWithdrawAmount": "0.02000000",
"depositStatus": true,
"withdrawFee": 0.01,
"withdrawStatus": true
}
}
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'assetDetail.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
# Withdraw Endpoints
def withdraw(self, **params):
"""Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceRequestException, BinanceAPIException, BinanceWithdrawException
"""
# force a name for the withdrawal if one not set
if 'asset' in params and 'name' not in params:
params['name'] = params['asset']
res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)
if not res.get('success'):
raise BinanceWithdrawException(res['msg'])
return res
def get_deposit_history(self, **params):
"""Fetch deposit history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:pending,1:success) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"depositList": [
{
"insertTime": 1508198532000,
"amount": 0.04670582,
"asset": "ETH",
"status": 1
}
],
"success": true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositHistory.html', True, data=params)
def get_withdraw_history(self, **params):
"""Fetch withdraw history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:Email Sent,1:Cancelled 2:Awaiting Approval 3:Rejected 4:Processing 5:Failure 6Completed) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"withdrawList": [
{
"amount": 1,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"asset": "ETH",
"applyTime": 1508198532000
"status": 4
},
{
"amount": 0.005,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"txId": "0x80aaabed54bdab3f6de5868f89929a2371ad21d666f20f7393d1a3389fad95a1",
"asset": "ETH",
"applyTime": 1508198532000,
"status": 4
}
],
"success": true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'withdrawHistory.html', True, data=params)
def get_withdraw_history_id(self, withdraw_id, **params):
"""Fetch withdraw history.
https://www.binance.com/restapipub.html
:param withdraw_id: required
:type withdraw_id: str
:param asset: optional
:type asset: str
:type status: 0(0:Email Sent,1:Cancelled 2:Awaiting Approval 3:Rejected 4:Processing 5:Failure 6Completed) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"amount": 1,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"asset": "ETH",
"applyTime": 1508198532000
"status": 4
}
:raises: BinanceRequestException, BinanceAPIException
"""
result = self._request_withdraw_api('get', 'withdrawHistory.html', True, data=params)
for entry in result['withdrawList']:
if 'id' in entry and entry['id'] == withdraw_id:
return entry
raise Exception("There is no entry with withdraw id", result)
def get_deposit_address(self, **params):
"""Fetch a deposit address for a symbol
https://www.binance.com/restapipub.html
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"success": true,
"addressTag": "1231212",
"asset": "BNB"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositAddress.html', True, data=params)
# User Stream Endpoints
def stream_get_listen_key(self):
"""Start a new user data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the user stream alive.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#start-user-data-stream-user_stream
:returns: API response
.. code-block:: python
{
"listenKey": "pqia91ma19a5s61cv6a81va65sdf19v8a65a1a5s61cv6a81va65sdf19v8a65a1"
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self._post('userDataStream', False, data={}, version=self.PRIVATE_API_VERSION)
return res['listenKey']
def stream_keepalive(self, listenKey):
"""PING a user data stream to prevent a time out.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#keepalive-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._put('userDataStream', False, data=params, version=self.PRIVATE_API_VERSION)
def stream_close(self, listenKey):
"""Close out a user data stream.
https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#close-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._delete('userDataStream', False, data=params, version=self.PRIVATE_API_VERSION)
# Margin Trading Endpoints
def get_margin_account(self, **params):
"""Query cross-margin account details
https://binance-docs.github.io/apidocs/spot/en/#query-cross-margin-account-details-user_data
:returns: API response
.. code-block:: python
{
"borrowEnabled": true,
"marginLevel": "11.64405625",
"totalAssetOfBtc": "6.82728457",
"totalLiabilityOfBtc": "0.58633215",
"totalNetAssetOfBtc": "6.24095242",
"tradeEnabled": true,
"transferEnabled": true,
"userAssets": [
{
"asset": "BTC",
"borrowed": "0.00000000",
"free": "0.00499500",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00499500"
},
{
"asset": "BNB",
"borrowed": "201.66666672",
"free": "2346.50000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "2144.83333328"
},
{
"asset": "ETH",
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000"
},
{
"asset": "USDT",
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/account', True, data=params)
def get_isolated_margin_account(self, **params):
"""Query isolated margin account details
https://binance-docs.github.io/apidocs/spot/en/#query-isolated-margin-account-info-user_data
:param symbols: optional up to 5 margin pairs as a comma separated string
:type asset: str
.. code:: python
account_info = client.get_isolated_margin_account()
account_info = client.get_isolated_margin_account(symbols="BTCUSDT,ETHUSDT")
:returns: API response
.. code-block:: python
If "symbols" is not sent:
{
"assets":[
{
"baseAsset":
{
"asset": "BTC",
"borrowEnabled": true,
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000",
"netAssetOfBtc": "0.00000000",
"repayEnabled": true,
"totalAsset": "0.00000000"
},
"quoteAsset":
{
"asset": "USDT",
"borrowEnabled": true,
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000",
"netAssetOfBtc": "0.00000000",
"repayEnabled": true,
"totalAsset": "0.00000000"
},
"symbol": "BTCUSDT"
"isolatedCreated": true,
"marginLevel": "0.00000000",
"marginLevelStatus": "EXCESSIVE", // "EXCESSIVE", "NORMAL", "MARGIN_CALL", "PRE_LIQUIDATION", "FORCE_LIQUIDATION"
"marginRatio": "0.00000000",
"indexPrice": "10000.00000000"
"liquidatePrice": "1000.00000000",
"liquidateRate": "1.00000000"
"tradeEnabled": true
}
],
"totalAssetOfBtc": "0.00000000",
"totalLiabilityOfBtc": "0.00000000",
"totalNetAssetOfBtc": "0.00000000"
}
If "symbols" is sent:
{
"assets":[
{
"baseAsset":
{
"asset": "BTC",
"borrowEnabled": true,
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000",
"netAssetOfBtc": "0.00000000",
"repayEnabled": true,
"totalAsset": "0.00000000"
},
"quoteAsset":
{
"asset": "USDT",
"borrowEnabled": true,
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000",
"netAssetOfBtc": "0.00000000",
"repayEnabled": true,
"totalAsset": "0.00000000"
},
"symbol": "BTCUSDT"
"isolatedCreated": true,
"marginLevel": "0.00000000",
"marginLevelStatus": "EXCESSIVE", // "EXCESSIVE", "NORMAL", "MARGIN_CALL", "PRE_LIQUIDATION", "FORCE_LIQUIDATION"
"marginRatio": "0.00000000",
"indexPrice": "10000.00000000"
"liquidatePrice": "1000.00000000",
"liquidateRate": "1.00000000"
"tradeEnabled": true
}
]
}
"""
return self._request_margin_api('get', 'margin/isolated/account', True, data=params)
def get_margin_asset(self, **params):
"""Query cross-margin asset
https://binance-docs.github.io/apidocs/spot/en/#query-margin-asset-market_data
:param asset: name of the asset
:type asset: str
.. code:: python
asset_details = client.get_margin_asset(asset='BNB')
:returns: API response
.. code-block:: python
{
"assetFullName": "Binance Coin",
"assetName": "BNB",
"isBorrowable": false,
"isMortgageable": true,
"userMinBorrow": "0.00000000",
"userMinRepay": "0.00000000"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/asset', data=params)
def get_margin_symbol(self, **params):
"""Query cross-margin symbol info
https://binance-docs.github.io/apidocs/spot/en/#query-cross-margin-pair-market_data
:param symbol: name of the symbol pair
:type symbol: str
.. code:: python
pair_details = client.get_margin_symbol(symbol='BTCUSDT')
:returns: API response
.. code-block:: python
{
"id":323355778339572400,
"symbol":"BTCUSDT",
"base":"BTC",
"quote":"USDT",
"isMarginTrade":true,
"isBuyAllowed":true,
"isSellAllowed":true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/pair', data=params)
def create_isolated_margin_account(self, **params):
"""Create isolated margin account for symbol
https://binance-docs.github.io/apidocs/spot/en/#create-isolated-margin-account-margin
:param base: Base asset of symbol
:type base: str
:param quote: Quote asset of symbol
:type quote: str
.. code:: python
pair_details = client.create_isolated_margin_account(base='USDT', quote='BTC')
:returns: API response
.. code-block:: python
{
"success": true,
"symbol": "BTCUSDT"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'margin/isolated/create', signed=True, data=params)
def get_isolated_margin_symbol(self, **params):
"""Query isolated margin symbol info
https://binance-docs.github.io/apidocs/spot/en/#query-isolated-margin-symbol-user_data
:param symbol: name of the symbol pair
:type symbol: str
.. code:: python
pair_details = client.get_isolated_margin_symbol(symbol='BTCUSDT')
:returns: API response
.. code-block:: python
{
"symbol":"BTCUSDT",
"base":"BTC",
"quote":"USDT",
"isMarginTrade":true,
"isBuyAllowed":true,
"isSellAllowed":true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/isolated/pair', signed=True, data=params)
def get_all_isolated_margin_symbols(self, **params):
"""Query isolated margin symbol info for all pairs
https://binance-docs.github.io/apidocs/spot/en/#get-all-isolated-margin-symbol-user_data
.. code:: python
pair_details = client.get_all_isolated_margin_symbols()
:returns: API response
.. code-block:: python
[
{
"base": "BNB",
"isBuyAllowed": true,
"isMarginTrade": true,
"isSellAllowed": true,
"quote": "BTC",
"symbol": "BNBBTC"
},
{
"base": "TRX",
"isBuyAllowed": true,
"isMarginTrade": true,
"isSellAllowed": true,
"quote": "BTC",
"symbol": "TRXBTC"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/isolated/allPairs', signed=True, data=params)
def toggle_bnb_burn_spot_margin(self, **params):
"""Toggle BNB Burn On Spot Trade And Margin Interest
https://binance-docs.github.io/apidocs/spot/en/#toggle-bnb-burn-on-spot-trade-and-margin-interest-user_data
:param spotBNBBurn: Determines whether to use BNB to pay for trading fees on SPOT
:type spotBNBBurn: bool
:param interestBNBBurn: Determines whether to use BNB to pay for margin loan's interest
:type interestBNBBurn: bool
.. code:: python
response = client.toggle_bnb_burn_spot_margin()
:returns: API response
.. code-block:: python
{
"spotBNBBurn":true,
"interestBNBBurn": false
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'bnbBurn', signed=True, data=params)
def get_bnb_burn_spot_margin(self, **params):
"""Get BNB Burn Status
https://binance-docs.github.io/apidocs/spot/en/#get-bnb-burn-status-user_data
.. code:: python
status = client.get_bnb_burn_spot_margin()
:returns: API response
.. code-block:: python
{
"spotBNBBurn":true,
"interestBNBBurn": false
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'bnbBurn', signed=True, data=params)
def get_margin_price_index(self, **params):
"""Query margin priceIndex
https://binance-docs.github.io/apidocs/spot/en/#query-margin-priceindex-market_data
:param symbol: name of the symbol pair
:type symbol: str
.. code:: python
price_index_details = client.get_margin_price_index(symbol='BTCUSDT')
:returns: API response
.. code-block:: python
{
"calcTime": 1562046418000,
"price": "0.00333930",
"symbol": "BNBBTC"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/priceIndex', data=params)
def transfer_margin_to_spot(self, **params):
"""Execute transfer between cross-margin account and spot account.
https://binance-docs.github.io/apidocs/spot/en/#cross-margin-account-transfer-margin
:param asset: name of the asset
:type asset: str
:param amount: amount to transfer
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer = client.transfer_margin_to_spot(asset='BTC', amount='1.1')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
params['type'] = 2
return self._request_margin_api('post', 'margin/transfer', signed=True, data=params)
def transfer_spot_to_margin(self, **params):
"""Execute transfer between spot account and cross-margin account.
https://binance-docs.github.io/apidocs/spot/en/#cross-margin-account-transfer-margin
:param asset: name of the asset
:type asset: str
:param amount: amount to transfer
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer = client.transfer_spot_to_margin(asset='BTC', amount='1.1')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
params['type'] = 1
return self._request_margin_api('post', 'margin/transfer', signed=True, data=params)
def transfer_isolated_margin_to_spot(self, **params):
"""Execute transfer between isolated margin account and spot account.
https://binance-docs.github.io/apidocs/spot/en/#isolated-margin-account-transfer-margin
:param asset: name of the asset
:type asset: str
:param symbol: pair symbol
:type symbol: str
:param amount: amount to transfer
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer = client.transfer_isolated_margin_to_spot(asset='BTC',
symbol='ETHBTC', amount='1.1')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
params['transFrom'] = "ISOLATED_MARGIN"
params['transTo'] = "SPOT"
return self._request_margin_api('post', 'margin/isolated/transfer', signed=True, data=params)
def transfer_spot_to_isolated_margin(self, **params):
"""Execute transfer between spot account and isolated margin account.
https://binance-docs.github.io/apidocs/spot/en/#isolated-margin-account-transfer-margin
:param asset: name of the asset
:type asset: str
:param symbol: pair symbol
:type symbol: str
:param amount: amount to transfer
:type amount: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transfer = client.transfer_spot_to_isolated_margin(asset='BTC',
symbol='ETHBTC', amount='1.1')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
params['transFrom'] = "SPOT"
params['transTo'] = "ISOLATED_MARGIN"
return self._request_margin_api('post', 'margin/isolated/transfer', signed=True, data=params)
def create_margin_loan(self, **params):
"""Apply for a loan in cross-margin or isolated-margin account.
https://binance-docs.github.io/apidocs/spot/en/#margin-account-borrow-margin
:param asset: name of the asset
:type asset: str
:param amount: amount to transfer
:type amount: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param symbol: Isolated margin symbol (default blank for cross-margin)
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transaction = client.margin_create_loan(asset='BTC', amount='1.1')
transaction = client.margin_create_loan(asset='BTC', amount='1.1',
isIsolated='TRUE', symbol='ETHBTC')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'margin/loan', signed=True, data=params)
def repay_margin_loan(self, **params):
"""Repay loan in cross-margin or isolated-margin account.
If amount is more than the amount borrowed, the full loan will be repaid.
https://binance-docs.github.io/apidocs/spot/en/#margin-account-repay-margin
:param asset: name of the asset
:type asset: str
:param amount: amount to transfer
:type amount: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param symbol: Isolated margin symbol (default blank for cross-margin)
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
.. code:: python
transaction = client.margin_repay_loan(asset='BTC', amount='1.1')
transaction = client.margin_repay_loan(asset='BTC', amount='1.1',
isIsolated='TRUE', symbol='ETHBTC')
:returns: API response
.. code-block:: python
{
"tranId": 100000001
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'margin/repay', signed=True, data=params)
def create_margin_order(self, **params):
"""Post a new order for margin account.
https://binance-docs.github.io/apidocs/spot/en/#margin-account-new-order-trade
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param side: required
:type side: str
:param type: required
:type type: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param stopPrice: Used with STOP_LOSS, STOP_LOSS_LIMIT, TAKE_PROFIT, and TAKE_PROFIT_LIMIT orders.
:type stopPrice: str
:param timeInForce: required if limit order GTC,IOC,FOK
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; MARKET and LIMIT order types default to
FULL, all other orders default to ACK.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
Response ACK:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595
}
Response RESULT:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "1.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"cummulativeQuoteQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
}
Response FULL:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "1.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"cummulativeQuoteQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "4000.00000000",
"qty": "1.00000000",
"commission": "4.00000000",
"commissionAsset": "USDT"
},
{
"price": "3999.00000000",
"qty": "5.00000000",
"commission": "19.99500000",
"commissionAsset": "USDT"
},
{
"price": "3998.00000000",
"qty": "2.00000000",
"commission": "7.99600000",
"commissionAsset": "USDT"
},
{
"price": "3997.00000000",
"qty": "1.00000000",
"commission": "3.99700000",
"commissionAsset": "USDT"
},
{
"price": "3995.00000000",
"qty": "1.00000000",
"commission": "3.99500000",
"commissionAsset": "USDT"
}
]
}
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException,
BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException,
BinanceOrderInactiveSymbolException
"""
return self._request_margin_api('post', 'margin/order', signed=True, data=params)
def cancel_margin_order(self, **params):
"""Cancel an active order for margin account.
Either orderId or origClientOrderId must be sent.
https://binance-docs.github.io/apidocs/spot/en/#margin-account-cancel-order-trade
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param orderId:
:type orderId: str
:param origClientOrderId:
:type origClientOrderId: str
:param newClientOrderId: Used to uniquely identify this cancel. Automatically generated by default.
:type newClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"symbol": "LTCBTC",
"orderId": 28,
"origClientOrderId": "myOrder1",
"clientOrderId": "cancelMyOrder1",
"transactTime": 1507725176595,
"price": "1.00000000",
"origQty": "10.00000000",
"executedQty": "8.00000000",
"cummulativeQuoteQty": "8.00000000",
"status": "CANCELED",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "SELL"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('delete', 'margin/order', signed=True, data=params)
def get_margin_loan_details(self, **params):
"""Query loan record
txId or startTime must be sent. txId takes precedence.
https://binance-docs.github.io/apidocs/spot/en/#query-loan-record-user_data
:param asset: required
:type asset: str
:param isolatedSymbol: isolated symbol (if querying isolated margin)
:type isolatedSymbol: str
:param txId: the tranId in of the created loan
:type txId: str
:param startTime: earliest timestamp to filter transactions
:type startTime: str
:param endTime: Used to uniquely identify this cancel. Automatically generated by default.
:type endTime: str
:param current: Currently querying page. Start from 1. Default:1
:type current: str
:param size: Default:10 Max:100
:type size: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"rows": [
{
"asset": "BNB",
"principal": "0.84624403",
"timestamp": 1555056425000,
//one of PENDING (pending to execution), CONFIRMED (successfully loaned), FAILED (execution failed, nothing happened to your account);
"status": "CONFIRMED"
}
],
"total": 1
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/loan', signed=True, data=params)
def get_margin_repay_details(self, **params):
"""Query repay record
txId or startTime must be sent. txId takes precedence.
https://binance-docs.github.io/apidocs/spot/en/#query-repay-record-user_data
:param asset: required
:type asset: str
:param isolatedSymbol: isolated symbol (if querying isolated margin)
:type isolatedSymbol: str
:param txId: the tranId in of the created loan
:type txId: str
:param startTime:
:type startTime: str
:param endTime: Used to uniquely identify this cancel. Automatically generated by default.
:type endTime: str
:param current: Currently querying page. Start from 1. Default:1
:type current: str
:param size: Default:10 Max:100
:type size: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"rows": [
{
//Total amount repaid
"amount": "14.00000000",
"asset": "BNB",
//Interest repaid
"interest": "0.01866667",
//Principal repaid
"principal": "13.98133333",
//one of PENDING (pending to execution), CONFIRMED (successfully loaned), FAILED (execution failed, nothing happened to your account);
"status": "CONFIRMED",
"timestamp": 1563438204000,
"txId": 2970933056
}
],
"total": 1
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/repay', signed=True, data=params)
def get_margin_order(self, **params):
"""Query margin accounts order
Either orderId or origClientOrderId must be sent.
For some historical orders cummulativeQuoteQty will be < 0, meaning the data is not available at this time.
https://binance-docs.github.io/apidocs/spot/en/#query-margin-account-39-s-order-user_data
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param orderId:
:type orderId: str
:param origClientOrderId:
:type origClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"clientOrderId": "ZwfQzuDIGpceVhKW5DvCmO",
"cummulativeQuoteQty": "0.00000000",
"executedQty": "0.00000000",
"icebergQty": "0.00000000",
"isWorking": true,
"orderId": 213205622,
"origQty": "0.30000000",
"price": "0.00493630",
"side": "SELL",
"status": "NEW",
"stopPrice": "0.00000000",
"symbol": "BNBBTC",
"time": 1562133008725,
"timeInForce": "GTC",
"type": "LIMIT",
"updateTime": 1562133008725
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/order', signed=True, data=params)
def get_open_margin_orders(self, **params):
"""Query margin accounts open orders
If the symbol is not sent, orders for all symbols will be returned in an array (cross-margin only).
If querying isolated margin orders, both the isIsolated='TRUE' and symbol=symbol_name must be set.
When all symbols are returned, the number of requests counted against the rate limiter is equal to the number
of symbols currently trading on the exchange.
https://binance-docs.github.io/apidocs/spot/en/#query-margin-account-39-s-open-order-user_data
:param symbol: optional
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
[
{
"clientOrderId": "qhcZw71gAkCCTv0t0k8LUK",
"cummulativeQuoteQty": "0.00000000",
"executedQty": "0.00000000",
"icebergQty": "0.00000000",
"isWorking": true,
"orderId": 211842552,
"origQty": "0.30000000",
"price": "0.00475010",
"side": "SELL",
"status": "NEW",
"stopPrice": "0.00000000",
"symbol": "BNBBTC",
"time": 1562040170089,
"timeInForce": "GTC",
"type": "LIMIT",
"updateTime": 1562040170089
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/openOrders', signed=True, data=params)
def get_all_margin_orders(self, **params):
"""Query all margin accounts orders
If orderId is set, it will get orders >= that orderId. Otherwise most recent orders are returned.
For some historical orders cummulativeQuoteQty will be < 0, meaning the data is not available at this time.
https://binance-docs.github.io/apidocs/spot/en/#query-margin-account-39-s-all-order-user_data
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param orderId: optional
:type orderId: str
:param startTime: optional
:type startTime: str
:param endTime: optional
:type endTime: str
:param limit: Default 500; max 1000
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
[
{
"id": 43123876,
"price": "0.00395740",
"qty": "4.06000000",
"quoteQty": "0.01606704",
"symbol": "BNBBTC",
"time": 1556089977693
},
{
"id": 43123877,
"price": "0.00395740",
"qty": "0.77000000",
"quoteQty": "0.00304719",
"symbol": "BNBBTC",
"time": 1556089977693
},
{
"id": 43253549,
"price": "0.00428930",
"qty": "23.30000000",
"quoteQty": "0.09994069",
"symbol": "BNBBTC",
"time": 1556163963504
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/allOrders', signed=True, data=params)
def get_margin_trades(self, **params):
"""Query margin accounts trades
If fromId is set, it will get orders >= that fromId. Otherwise most recent orders are returned.
https://binance-docs.github.io/apidocs/spot/en/#query-margin-account-39-s-trade-list-user_data
:param symbol: required
:type symbol: str
:param isIsolated: set to 'TRUE' for isolated margin (default 'FALSE')
:type isIsolated: str
:param fromId: optional
:type fromId: str
:param startTime: optional
:type startTime: str
:param endTime: optional
:type endTime: str
:param limit: Default 500; max 1000
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
[
{
"commission": "0.00006000",
"commissionAsset": "BTC",
"id": 34,
"isBestMatch": true,
"isBuyer": false,
"isMaker": false,
"orderId": 39324,
"price": "0.02000000",
"qty": "3.00000000",
"symbol": "BNBBTC",
"time": 1561973357171
}, {
"commission": "0.00002950",
"commissionAsset": "BTC",
"id": 32,
"isBestMatch": true,
"isBuyer": false,
"isMaker": true,
"orderId": 39319,
"price": "0.00590000",
"qty": "5.00000000",
"symbol": "BNBBTC",
"time": 1561964645345
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/myTrades', signed=True, data=params)
def get_max_margin_loan(self, **params):
"""Query max borrow amount for an asset
https://binance-docs.github.io/apidocs/spot/en/#query-max-borrow-user_data
:param asset: required
:type asset: str
:param isolatedSymbol: isolated symbol (if querying isolated margin)
:type isolatedSymbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"amount": "1.69248805"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/maxBorrowable', signed=True, data=params)
def get_max_margin_transfer(self, **params):
"""Query max transfer-out amount
https://binance-docs.github.io/apidocs/spot/en/#query-max-transfer-out-amount-user_data
:param asset: required
:type asset: str
:param isolatedSymbol: isolated symbol (if querying isolated margin)
:type isolatedSymbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
{
"amount": "3.59498107"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'margin/maxTransferable', signed=True, data=params)
# Cross-margin
def margin_stream_get_listen_key(self):
"""Start a new cross-margin data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the stream alive.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-margin
:returns: API response
.. code-block:: python
{
"listenKey": "pqia91ma19a5s61cv6a81va65sdf19v8a65a1a5s61cv6a81va65sdf19v8a65a1"
}
:raises: BinanceRequestException, BinanceAPIException
"""
res = self._request_margin_api('post', 'userDataStream', signed=False, data={})
return res['listenKey']
def margin_stream_keepalive(self, listenKey):
"""PING a cross-margin data stream to prevent a time out.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-margin
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._request_margin_api('put', 'userDataStream', signed=False, data=params)
def margin_stream_close(self, listenKey):
"""Close out a cross-margin data stream.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-margin
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._request_margin_api('delete', 'userDataStream', signed=False, data=params)
# Isolated margin
def isolated_margin_stream_get_listen_key(self, symbol):
"""Start a new isolated margin data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the stream alive.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-isolated-margin
:param symbol: required - symbol for the isolated margin account
:type symbol: str
:returns: API response
.. code-block:: python
{
"listenKey": "T3ee22BIYuWqmvne0HNq2A2WsFlEtLhvWCtItw6ffhhdmjifQ2tRbuKkTHhr"
}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'symbol': symbol
}
res = self._request_margin_api('post', 'userDataStream/isolated', signed=False, data=params)
return res['listenKey']
def isolated_margin_stream_keepalive(self, symbol, listenKey):
"""PING an isolated margin data stream to prevent a time out.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-isolated-margin
:param symbol: required - symbol for the isolated margin account
:type symbol: str
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'symbol': symbol,
'listenKey': listenKey
}
return self._request_margin_api('put', 'userDataStream/isolated', signed=False, data=params)
def isolated_margin_stream_close(self, symbol, listenKey):
"""Close out an isolated margin data stream.
https://binance-docs.github.io/apidocs/spot/en/#listen-key-isolated-margin
:param symbol: required - symbol for the isolated margin account
:type symbol: str
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceRequestException, BinanceAPIException
"""
params = {
'symbol': symbol,
'listenKey': listenKey
}
return self._request_margin_api('delete', 'userDataStream/isolated', signed=False, data=params)
# Lending Endpoints
def get_lending_product_list(self, **params):
"""Get Lending Product List
https://binance-docs.github.io/apidocs/spot/en/#get-flexible-product-list-user_data
"""
return self._request_margin_api('get', 'lending/daily/product/list', signed=True, data=params)
def get_lending_daily_quota_left(self, **params):
"""Get Left Daily Purchase Quota of Flexible Product.
https://binance-docs.github.io/apidocs/spot/en/#get-left-daily-purchase-quota-of-flexible-product-user_data
"""
return self._request_margin_api('get', 'lending/daily/userLeftQuota', signed=True, data=params)
def purchase_lending_product(self, **params):
"""Purchase Flexible Product
https://binance-docs.github.io/apidocs/spot/en/#purchase-flexible-product-user_data
"""
return self._request_margin_api('post', 'lending/daily/purchase', signed=True, data=params)
def get_lending_daily_redemption_quota(self, **params):
"""Get Left Daily Redemption Quota of Flexible Product
https://binance-docs.github.io/apidocs/spot/en/#get-left-daily-redemption-quota-of-flexible-product-user_data
"""
return self._request_margin_api('get', 'lending/daily/userRedemptionQuota', signed=True, data=params)
def redeem_lending_product(self, **params):
"""Redeem Flexible Product
https://binance-docs.github.io/apidocs/spot/en/#redeem-flexible-product-user_data
"""
return self._request_margin_api('post', 'lending/daily/redeem', signed=True, data=params)
def get_lending_position(self, **params):
"""Get Flexible Product Position
https://binance-docs.github.io/apidocs/spot/en/#get-flexible-product-position-user_data
"""
return self._request_margin_api('get', 'lending/daily/token/position', signed=True, data=params)
def get_fixed_activity_project_list(self, **params):
"""Get Fixed and Activity Project List
https://binance-docs.github.io/apidocs/spot/en/#get-fixed-and-activity-project-list-user_data
:param asset: optional
:type asset: str
:param type: required - "ACTIVITY", "CUSTOMIZED_FIXED"
:type type: str
:param status: optional - "ALL", "SUBSCRIBABLE", "UNSUBSCRIBABLE"; default "ALL"
:type status: str
:param sortBy: optional - "START_TIME", "LOT_SIZE", "INTEREST_RATE", "DURATION"; default "START_TIME"
:type sortBy: str
:param current: optional - Currently querying page. Start from 1. Default:1
:type current: int
:param size: optional - Default:10, Max:100
:type size: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"asset": "USDT",
"displayPriority": 1,
"duration": 90,
"interestPerLot": "1.35810000",
"interestRate": "0.05510000",
"lotSize": "100.00000000",
"lotsLowLimit": 1,
"lotsPurchased": 74155,
"lotsUpLimit": 80000,
"maxLotsPerUser": 2000,
"needKyc": False,
"projectId": "CUSDT90DAYSS001",
"projectName": "USDT",
"status": "PURCHASING",
"type": "CUSTOMIZED_FIXED",
"withAreaLimitation": False
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'lending/project/list', signed=True, data=params)
def get_lending_account(self, **params):
"""Get Lending Account Details
https://binance-docs.github.io/apidocs/spot/en/#lending-account-user_data
"""
return self._request_margin_api('get', 'lending/union/account', signed=True, data=params)
def get_lending_purchase_history(self, **params):
"""Get Lending Purchase History
https://binance-docs.github.io/apidocs/spot/en/#get-purchase-record-user_data
"""
return self._request_margin_api('get', 'lending/union/purchaseRecord', signed=True, data=params)
def get_lending_redemption_history(self, **params):
"""Get Lending Redemption History
https://binance-docs.github.io/apidocs/spot/en/#get-redemption-record-user_data
"""
return self._request_margin_api('get', 'lending/union/redemptionRecord', signed=True, data=params)
def get_lending_interest_history(self, **params):
"""Get Lending Interest History
https://binance-docs.github.io/apidocs/spot/en/#get-interest-history-user_data-2
"""
return self._request_margin_api('get', 'lending/union/interestHistory', signed=True, data=params)
def change_fixed_activity_to_daily_position(self, **params):
"""Change Fixed/Activity Position to Daily Position
https://binance-docs.github.io/apidocs/spot/en/#change-fixed-activity-position-to-daily-position-user_data
"""
return self._request_margin_api('post', 'lending/positionChanged', signed=True, data=params)
# Sub Accounts
def get_sub_account_list(self, **params):
"""Query Sub-account List.
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-list-for-master-account
:param email: optional
:type email: str
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param page: optional
:type page: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"subAccounts":[
{
"email":"123@test.com",
"status":"enabled",
"activated":true,
"mobile":"91605290",
"gAuth":true,
"createTime":1544433328000
},
{
"email":"321@test.com",
"status":"disabled",
"activated":true,
"mobile":"22501238",
"gAuth":true,
"createTime":1544433328000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'sub-account/list.html', True, data=params)
def get_sub_account_transfer_history(self, **params):
"""Query Sub-account Transfer History.
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-spot-asset-transfer-history-for-master-account
:param email: required
:type email: str
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param page: optional
:type page: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"transfers":[
{
"from":"aaa@test.com",
"to":"bbb@test.com",
"asset":"BTC",
"qty":"1",
"time":1544433328000
},
{
"from":"bbb@test.com",
"to":"ccc@test.com",
"asset":"ETH",
"qty":"2",
"time":1544433328000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'sub-account/transfer/history.html', True, data=params)
def create_sub_account_transfer(self, **params):
"""Execute sub-account transfer
https://binance-docs.github.io/apidocs/spot/en/#sub-account-spot-asset-transfer-for-master-account
:param fromEmail: required - Sender email
:type fromEmail: str
:param toEmail: required - Recipient email
:type toEmail: str
:param asset: required
:type asset: str
:param amount: required
:type amount: decimal
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('post', 'sub-account/transfer.html', True, data=params)
def get_sub_account_futures_transfer_history(self, **params):
"""Query Sub-account Futures Transfer History.
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-futures-asset-transfer-history-for-master-account
:param email: required
:type email: str
:param futuresType: required
:type futuresType: int
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param page: optional
:type page: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"futuresType": 2,
"transfers":[
{
"from":"aaa@test.com",
"to":"bbb@test.com",
"asset":"BTC",
"qty":"1",
"time":1544433328000
},
{
"from":"bbb@test.com",
"to":"ccc@test.com",
"asset":"ETH",
"qty":"2",
"time":1544433328000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/futures/internalTransfer', True, data=params)
def create_sub_account_futures_transfer(self, **params):
"""Execute sub-account Futures transfer
https://github.com/binance-exchange/binance-official-api-docs/blob/9dbe0e961b80557bb19708a707c7fad08842b28e/wapi-api.md#sub-account-transferfor-master-account
:param fromEmail: required - Sender email
:type fromEmail: str
:param toEmail: required - Recipient email
:type toEmail: str
:param futuresType: required
:type futuresType: int
:param asset: required
:type asset: str
:param amount: required
:type amount: decimal
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"txnId":"2934662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/futures/internalTransfer', True, data=params)
def get_sub_account_assets(self, **params):
"""Fetch sub-account assets
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-assets-for-master-account
:param email: required
:type email: str
:param symbol: optional
:type symbol: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success":true,
"balances":[
{
"asset":"ADA",
"free":10000,
"locked":0
},
{
"asset":"BNB",
"free":10003,
"locked":0
},
{
"asset":"BTC",
"free":11467.6399,
"locked":0
},
{
"asset":"ETH",
"free":10004.995,
"locked":0
},
{
"asset":"USDT",
"free":11652.14213,
"locked":0
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'sub-account/assets.html', True, data=params)
def query_subaccount_spot_summary(self, **params):
"""Query Sub-account Spot Assets Summary (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-spot-assets-summary-for-master-account
:param email: optional - Sub account email
:type email: str
:param page: optional - default 1
:type page: int
:param size: optional - default 10, max 20
:type size: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"totalCount":2,
"masterAccountTotalAsset": "0.23231201",
"spotSubUserAssetBtcVoList":[
{
"email":"sub123@test.com",
"totalAsset":"9999.00000000"
},
{
"email":"test456@test.com",
"totalAsset":"0.00000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/spotSummary', True, data=params)
def get_subaccount_deposit_address(self, **params):
"""Get Sub-account Deposit Address (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-sub-account-deposit-address-for-master-account
:param email: required - Sub account email
:type email: str
:param coin: required
:type coin: str
:param network: optional
:type network: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"address":"TDunhSa7jkTNuKrusUTU1MUHtqXoBPKETV",
"coin":"USDT",
"tag":"",
"url":"https://tronscan.org/#/address/TDunhSa7jkTNuKrusUTU1MUHtqXoBPKETV"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'capital/deposit/subAddress', True, data=params)
def get_subaccount_deposit_history(self, **params):
"""Get Sub-account Deposit History (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-sub-account-deposit-address-for-master-account
:param email: required - Sub account email
:type email: str
:param coin: optional
:type coin: str
:param status: optional - (0:pending,6: credited but cannot withdraw, 1:success)
:type status: int
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param limit: optional
:type limit: int
:param offset: optional - default:0
:type offset: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"amount":"0.00999800",
"coin":"PAXG",
"network":"ETH",
"status":1,
"address":"0x788cabe9236ce061e5a892e1a59395a81fc8d62c",
"addressTag":"",
"txId":"0xaad4654a3234aa6118af9b4b335f5ae81c360b2394721c019b5d1e75328b09f3",
"insertTime":1599621997000,
"transferType":0,
"confirmTimes":"12/12"
},
{
"amount":"0.50000000",
"coin":"IOTA",
"network":"IOTA",
"status":1,
"address":"SIZ9VLMHWATXKV99LH99CIGFJFUMLEHGWVZVNNZXRJJVWBPHYWPPBOSDORZ9EQSHCZAMPVAPGFYQAUUV9DROOXJLNW",
"addressTag":"",
"txId":"ESBFVQUTPIWQNJSPXFNHNYHSQNTGKRVKPRABQWTAXCDWOAKDKYWPTVG9BGXNVNKTLEJGESAVXIKIZ9999",
"insertTime":1599620082000,
"transferType":0,
"confirmTimes":"1/1"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'capital/deposit/subHisrec', True, data=params)
def get_subaccount_futures_margin_status(self, **params):
"""Get Sub-account's Status on Margin/Futures (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-sub-account-39-s-status-on-margin-futures-for-master-account
:param email: optional - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"email":"123@test.com", // user email
"isSubUserEnabled": true, // true or false
"isUserActive": true, // true or false
"insertTime": 1570791523523 // sub account create time
"isMarginEnabled": true, // true or false for margin
"isFutureEnabled": true // true or false for futures.
"mobile": 1570791523523 // user mobile number
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/status', True, data=params)
def enable_subaccount_margin(self, **params):
"""Enable Margin for Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#enable-margin-for-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"email":"123@test.com",
"isMarginEnabled": true
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/margin/enable', True, data=params)
def get_subaccount_margin_details(self, **params):
"""Get Detail on Sub-account's Margin Account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-detail-on-sub-account-39-s-margin-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"email":"123@test.com",
"marginLevel": "11.64405625",
"totalAssetOfBtc": "6.82728457",
"totalLiabilityOfBtc": "0.58633215",
"totalNetAssetOfBtc": "6.24095242",
"marginTradeCoeffVo":
{
"forceLiquidationBar": "1.10000000", // Liquidation margin ratio
"marginCallBar": "1.50000000", // Margin call margin ratio
"normalBar": "2.00000000" // Initial margin ratio
},
"marginUserAssetVoList": [
{
"asset": "BTC",
"borrowed": "0.00000000",
"free": "0.00499500",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00499500"
},
{
"asset": "BNB",
"borrowed": "201.66666672",
"free": "2346.50000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "2144.83333328"
},
{
"asset": "ETH",
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000"
},
{
"asset": "USDT",
"borrowed": "0.00000000",
"free": "0.00000000",
"interest": "0.00000000",
"locked": "0.00000000",
"netAsset": "0.00000000"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/margin/account', True, data=params)
def get_subaccount_margin_summary(self, **params):
"""Get Summary of Sub-account's Margin Account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-summary-of-sub-account-39-s-margin-account-for-master-account
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"totalAssetOfBtc": "4.33333333",
"totalLiabilityOfBtc": "2.11111112",
"totalNetAssetOfBtc": "2.22222221",
"subAccountList":[
{
"email":"123@test.com",
"totalAssetOfBtc": "2.11111111",
"totalLiabilityOfBtc": "1.11111111",
"totalNetAssetOfBtc": "1.00000000"
},
{
"email":"345@test.com",
"totalAssetOfBtc": "2.22222222",
"totalLiabilityOfBtc": "1.00000001",
"totalNetAssetOfBtc": "1.22222221"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/margin/accountSummary', True, data=params)
def enable_subaccount_futures(self, **params):
"""Enable Futures for Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#enable-futures-for-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"email":"123@test.com",
"isFuturesEnabled": true // true or false
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/futures/enable', True, data=params)
def get_subaccount_futures_details(self, **params):
"""Get Detail on Sub-account's Futures Account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-detail-on-sub-account-39-s-futures-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"email": "abc@test.com",
"asset": "USDT",
"assets":[
{
"asset": "USDT",
"initialMargin": "0.00000000",
"maintenanceMargin": "0.00000000",
"marginBalance": "0.88308000",
"maxWithdrawAmount": "0.88308000",
"openOrderInitialMargin": "0.00000000",
"positionInitialMargin": "0.00000000",
"unrealizedProfit": "0.00000000",
"walletBalance": "0.88308000"
}
],
"canDeposit": true,
"canTrade": true,
"canWithdraw": true,
"feeTier": 2,
"maxWithdrawAmount": "0.88308000",
"totalInitialMargin": "0.00000000",
"totalMaintenanceMargin": "0.00000000",
"totalMarginBalance": "0.88308000",
"totalOpenOrderInitialMargin": "0.00000000",
"totalPositionInitialMargin": "0.00000000",
"totalUnrealizedProfit": "0.00000000",
"totalWalletBalance": "0.88308000",
"updateTime": 1576756674610
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/futures/account', True, data=params)
def get_subaccount_futures_summary(self, **params):
"""Get Summary of Sub-account's Futures Account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-summary-of-sub-account-39-s-futures-account-for-master-account
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"totalInitialMargin": "9.83137400",
"totalMaintenanceMargin": "0.41568700",
"totalMarginBalance": "23.03235621",
"totalOpenOrderInitialMargin": "9.00000000",
"totalPositionInitialMargin": "0.83137400",
"totalUnrealizedProfit": "0.03219710",
"totalWalletBalance": "22.15879444",
"asset": "USDT",
"subAccountList":[
{
"email": "123@test.com",
"totalInitialMargin": "9.00000000",
"totalMaintenanceMargin": "0.00000000",
"totalMarginBalance": "22.12659734",
"totalOpenOrderInitialMargin": "9.00000000",
"totalPositionInitialMargin": "0.00000000",
"totalUnrealizedProfit": "0.00000000",
"totalWalletBalance": "22.12659734",
"asset": "USDT"
},
{
"email": "345@test.com",
"totalInitialMargin": "0.83137400",
"totalMaintenanceMargin": "0.41568700",
"totalMarginBalance": "0.90575887",
"totalOpenOrderInitialMargin": "0.00000000",
"totalPositionInitialMargin": "0.83137400",
"totalUnrealizedProfit": "0.03219710",
"totalWalletBalance": "0.87356177",
"asset": "USDT"
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/futures/accountSummary', True, data=params)
def get_subaccount_futures_positionrisk(self, **params):
"""Get Futures Position-Risk of Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#get-futures-position-risk-of-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"entryPrice": "9975.12000",
"leverage": "50", // current initial leverage
"maxNotional": "1000000", // notional value limit of current initial leverage
"liquidationPrice": "7963.54",
"markPrice": "9973.50770517",
"positionAmount": "0.010",
"symbol": "BTCUSDT",
"unrealizedProfit": "-0.01612295"
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/futures/positionRisk', True, data=params)
def make_subaccount_futures_transfer(self, **params):
"""Futures Transfer for Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#futures-transfer-for-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required - The amount to be transferred
:type amount: float
:param type: required - 1: transfer from subaccount's spot account to its USDT-margined futures account
2: transfer from subaccount's USDT-margined futures account to its spot account
3: transfer from subaccount's spot account to its COIN-margined futures account
4: transfer from subaccount's COIN-margined futures account to its spot account
:type type: int
:returns: API response
.. code-block:: python
{
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/futures/transfer', True, data=params)
def make_subaccount_margin_transfer(self, **params):
"""Margin Transfer for Sub-account (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#margin-transfer-for-sub-account-for-master-account
:param email: required - Sub account email
:type email: str
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required - The amount to be transferred
:type amount: float
:param type: required - 1: transfer from subaccount's spot account to margin account
2: transfer from subaccount's margin account to its spot account
:type type: int
:returns: API response
.. code-block:: python
{
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/margin/transfer', True, data=params)
def make_subaccount_to_subaccount_transfer(self, **params):
"""Transfer to Sub-account of Same Master (For Sub-account)
https://binance-docs.github.io/apidocs/spot/en/#transfer-to-sub-account-of-same-master-for-sub-account
:param toEmail: required - Sub account email
:type toEmail: str
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required - The amount to be transferred
:type amount: float
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/transfer/subToSub', True, data=params)
def make_subaccount_to_master_transfer(self, **params):
"""Transfer to Master (For Sub-account)
https://binance-docs.github.io/apidocs/spot/en/#transfer-to-master-for-sub-account
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required - The amount to be transferred
:type amount: float
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"txnId":"2966662589"
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/transfer/subToMaster', True, data=params)
def get_subaccount_transfer_history(self, **params):
"""Sub-account Transfer History (For Sub-account)
https://binance-docs.github.io/apidocs/spot/en/#transfer-to-master-for-sub-account
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param type: optional - 1: transfer in, 2: transfer out
:type type: int
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param limit: optional - Default 500
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"counterParty":"master",
"email":"master@test.com",
"type":1, // 1 for transfer in, 2 for transfer out
"asset":"BTC",
"qty":"1",
"status":"SUCCESS",
"tranId":11798835829,
"time":1544433325000
},
{
"counterParty":"subAccount",
"email":"sub2@test.com",
"type":2,
"asset":"ETH",
"qty":"2",
"status":"SUCCESS",
"tranId":11798829519,
"time":1544433326000
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/transfer/subUserHistory', True, data=params)
def make_universal_transfer(self, **params):
"""Universal Transfer (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#universal-transfer-for-master-account
:param fromEmail: optional
:type fromEmail: str
:param toEmail: optional
:type toEmail: str
:param fromAccountType: required
:type fromAccountType: str
:param toAccountType: required
:type toAccountType: str
:param asset: required - The asset being transferred, e.g., USDT
:type asset: str
:param amount: required
:type amount: float
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"tranId":11945860693
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'sub-account/universalTransfer', True, data=params)
def get_universal_transfer_history(self, **params):
"""Universal Transfer (For Master Account)
https://binance-docs.github.io/apidocs/spot/en/#query-universal-transfer-history
:param fromEmail: optional
:type fromEmail: str
:param toEmail: optional
:type toEmail: str
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param page: optional
:type page: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"tranId":11945860693,
"fromEmail":"master@test.com",
"toEmail":"subaccount1@test.com",
"asset":"BTC",
"amount":"0.1",
"fromAccountType":"SPOT",
"toAccountType":"COIN_FUTURE",
"status":"SUCCESS",
"createTimeStamp":1544433325000
},
{
"tranId":11945857955,
"fromEmail":"master@test.com",
"toEmail":"subaccount2@test.com",
"asset":"ETH",
"amount":"0.2",
"fromAccountType":"SPOT",
"toAccountType":"USDT_FUTURE",
"status":"SUCCESS",
"createTimeStamp":1544433326000
}
]
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'sub-account/universalTransfer', True, data=params)
# Futures API
def futures_ping(self):
"""Test connectivity to the Rest API
https://binance-docs.github.io/apidocs/futures/en/#test-connectivity
"""
return self._request_futures_api('get', 'ping')
def futures_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://binance-docs.github.io/apidocs/futures/en/#check-server-time
"""
return self._request_futures_api('get', 'time')
def futures_exchange_info(self):
"""Current exchange trading rules and symbol information
https://binance-docs.github.io/apidocs/futures/en/#exchange-information-market_data
"""
return self._request_futures_api('get', 'exchangeInfo')
def futures_order_book(self, **params):
"""Get the Order Book for the market
https://binance-docs.github.io/apidocs/futures/en/#order-book-market_data
"""
return self._request_futures_api('get', 'depth', data=params)
def futures_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://binance-docs.github.io/apidocs/futures/en/#recent-trades-list-market_data
"""
return self._request_futures_api('get', 'trades', data=params)
def futures_historical_trades(self, **params):
"""Get older market historical trades.
https://binance-docs.github.io/apidocs/futures/en/#old-trades-lookup-market_data
"""
return self._request_futures_api('get', 'historicalTrades', data=params)
def futures_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time, from the same order, with the same
price will have the quantity aggregated.
https://binance-docs.github.io/apidocs/futures/en/#compressed-aggregate-trades-list-market_data
"""
return self._request_futures_api('get', 'aggTrades', data=params)
def futures_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://binance-docs.github.io/apidocs/futures/en/#kline-candlestick-data-market_data
"""
return self._request_futures_api('get', 'klines', data=params)
def futures_historical_klines(self, symbol, interval, start_str, end_str=None,
limit=500):
"""Get historical futures klines from Binance
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:param limit: Default 500; max 1000.
:type limit: int
:return: list of OHLCV values
"""
return self._historical_klines(symbol, interval, start_str, end_str=None, limit=500, spot=False)
def futures_historical_klines_generator(self, symbol, interval, start_str, end_str=None):
"""Get historical futures klines generator from Binance
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Binance Kline interval
:type interval: str
:param start_str: Start date string in UTC format or timestamp in milliseconds
:type start_str: str|int
:param end_str: optional - end date string in UTC format or timestamp in milliseconds (default will fetch everything up to now)
:type end_str: str|int
:return: generator of OHLCV values
"""
return self._historical_klines_generator(symbol, interval, start_str, end_str=end_str, spot=False)
def futures_mark_price(self, **params):
"""Get Mark Price and Funding Rate
https://binance-docs.github.io/apidocs/futures/en/#mark-price-market_data
"""
return self._request_futures_api('get', 'premiumIndex', data=params)
def futures_funding_rate(self, **params):
"""Get funding rate history
https://binance-docs.github.io/apidocs/futures/en/#get-funding-rate-history-market_data
"""
return self._request_futures_api('get', 'fundingRate', data=params)
def futures_ticker(self, **params):
"""24 hour rolling window price change statistics.
https://binance-docs.github.io/apidocs/futures/en/#24hr-ticker-price-change-statistics-market_data
"""
return self._request_futures_api('get', 'ticker/24hr', data=params)
def futures_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://binance-docs.github.io/apidocs/futures/en/#symbol-price-ticker-market_data
"""
return self._request_futures_api('get', 'ticker/price', data=params)
def futures_orderbook_ticker(self, **params):
"""Best price/qty on the order book for a symbol or symbols.
https://binance-docs.github.io/apidocs/futures/en/#symbol-order-book-ticker-market_data
"""
return self._request_futures_api('get', 'ticker/bookTicker', data=params)
def futures_liquidation_orders(self, **params):
"""Get all liquidation orders
https://binance-docs.github.io/apidocs/futures/en/#get-all-liquidation-orders-market_data
"""
return self._request_futures_api('get', 'ticker/allForceOrders', data=params)
def futures_open_interest(self, **params):
"""Get present open interest of a specific symbol.
https://binance-docs.github.io/apidocs/futures/en/#open-interest-market_data
"""
return self._request_futures_api('get', 'ticker/openInterest', data=params)
def futures_open_interest_hist(self, **params):
"""Get open interest statistics of a specific symbol.
https://binance-docs.github.io/apidocs/futures/en/#open-interest-statistics
"""
return self._request_futures_data_api('get', 'openInterestHist', data=params)
def futures_leverage_bracket(self, **params):
"""Notional and Leverage Brackets
https://binance-docs.github.io/apidocs/futures/en/#notional-and-leverage-brackets-market_data
"""
return self._request_futures_api('get', 'leverageBracket', True, data=params)
def futures_account_transfer(self, **params):
"""Execute transfer between spot account and futures account.
https://binance-docs.github.io/apidocs/futures/en/#new-future-account-transfer
"""
return self._request_margin_api('post', 'futures/transfer', True, data=params)
def transfer_history(self, **params):
"""Get future account transaction history list
https://binance-docs.github.io/apidocs/futures/en/#get-future-account-transaction-history-list-user_data
"""
return self._request_margin_api('get', 'futures/transfer', True, data=params)
def futures_create_order(self, **params):
"""Send in a new order.
https://binance-docs.github.io/apidocs/futures/en/#new-order-trade
"""
return self._request_futures_api('post', 'order', True, data=params)
def futures_get_order(self, **params):
"""Check an order's status.
https://binance-docs.github.io/apidocs/futures/en/#query-order-user_data
"""
return self._request_futures_api('get', 'order', True, data=params)
def futures_get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://binance-docs.github.io/apidocs/futures/en/#current-open-orders-user_data
"""
return self._request_futures_api('get', 'openOrders', True, data=params)
def futures_get_all_orders(self, **params):
"""Get all futures account orders; active, canceled, or filled.
https://binance-docs.github.io/apidocs/futures/en/#all-orders-user_data
"""
return self._request_futures_api('get', 'allOrders', True, data=params)
def futures_cancel_order(self, **params):
"""Cancel an active futures order.
https://binance-docs.github.io/apidocs/futures/en/#cancel-order-trade
"""
return self._request_futures_api('delete', 'order', True, data=params)
def futures_cancel_all_open_orders(self, **params):
"""Cancel all open futures orders
https://binance-docs.github.io/apidocs/futures/en/#cancel-all-open-orders-trade
"""
return self._request_futures_api('delete', 'allOpenOrders', True, data=params)
def futures_cancel_orders(self, **params):
"""Cancel multiple futures orders
https://binance-docs.github.io/apidocs/futures/en/#cancel-multiple-orders-trade
"""
return self._request_futures_api('delete', 'batchOrders', True, data=params)
def futures_account_balance(self, **params):
"""Get futures account balance
https://binance-docs.github.io/apidocs/futures/en/#future-account-balance-user_data
"""
return self._request_futures_api('get', 'balance', True, data=params)
def futures_account(self, **params):
"""Get current account information.
https://binance-docs.github.io/apidocs/futures/en/#account-information-user_data
"""
return self._request_futures_api('get', 'account', True, data=params)
def futures_change_leverage(self, **params):
"""Change user's initial leverage of specific symbol market
https://binance-docs.github.io/apidocs/futures/en/#change-initial-leverage-trade
"""
return self._request_futures_api('post', 'leverage', True, data=params)
def futures_change_margin_type(self, **params):
"""Change the margin type for a symbol
https://binance-docs.github.io/apidocs/futures/en/#change-margin-type-trade
"""
return self._request_futures_api('post', 'marginType', True, data=params)
def futures_change_position_margin(self, **params):
"""Change the position margin for a symbol
https://binance-docs.github.io/apidocs/futures/en/#modify-isolated-position-margin-trade
"""
return self._request_futures_api('post', 'positionMargin', True, data=params)
def futures_position_margin_history(self, **params):
"""Get position margin change history
https://binance-docs.github.io/apidocs/futures/en/#get-postion-margin-change-history-trade
"""
return self._request_futures_api('get', 'positionMargin/history', True, data=params)
def futures_position_information(self, **params):
"""Get position information
https://binance-docs.github.io/apidocs/futures/en/#position-information-user_data
"""
return self._request_futures_api('get', 'positionRisk', True, data=params)
def futures_account_trades(self, **params):
"""Get trades for the authenticated account and symbol.
https://binance-docs.github.io/apidocs/futures/en/#account-trade-list-user_data
"""
return self._request_futures_api('get', 'userTrades', True, data=params)
def futures_income_history(self, **params):
"""Get income history for authenticated account
https://binance-docs.github.io/apidocs/futures/en/#get-income-history-user_data
"""
return self._request_futures_api('get', 'income', True, data=params)
def futures_change_position_mode(self, **params):
"""Change position mode for authenticated account
https://binance-docs.github.io/apidocs/futures/en/#change-position-mode-trade
"""
return self._request_futures_api('post', 'positionSide/dual', True, data=params)
def futures_get_position_mode(self, **params):
"""Get position mode for authenticated account
https://binance-docs.github.io/apidocs/futures/en/#get-current-position-mode-user_data
"""
return self._request_futures_api('get', 'positionSide/dual', True, data=params)
# COIN Futures API
def futures_coin_ping(self):
"""Test connectivity to the Rest API
https://binance-docs.github.io/apidocs/delivery/en/#test-connectivity
"""
return self._request_futures_coin_api("get", "ping")
def futures_coin_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://binance-docs.github.io/apidocs/delivery/en/#check-server-time
"""
return self._request_futures_coin_api("get", "time")
def futures_coin_exchange_info(self):
"""Current exchange trading rules and symbol information
https://binance-docs.github.io/apidocs/delivery/en/#exchange-information
"""
return self._request_futures_coin_api("get", "exchangeInfo")
def futures_coin_order_book(self, **params):
"""Get the Order Book for the market
https://binance-docs.github.io/apidocs/delivery/en/#order-book
"""
return self._request_futures_coin_api("get", "depth", data=params)
def futures_coin_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://binance-docs.github.io/apidocs/delivery/en/#recent-trades-list
"""
return self._request_futures_coin_api("get", "trades", data=params)
def futures_coin_historical_trades(self, **params):
"""Get older market historical trades.
https://binance-docs.github.io/apidocs/delivery/en/#old-trades-lookup-market_data
"""
return self._request_futures_coin_api("get", "historicalTrades", data=params)
def futures_coin_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time, from the same order, with the same
price will have the quantity aggregated.
https://binance-docs.github.io/apidocs/delivery/en/#compressed-aggregate-trades-list
"""
return self._request_futures_coin_api("get", "aggTrades", data=params)
def futures_coin_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://binance-docs.github.io/apidocs/delivery/en/#kline-candlestick-data
"""
return self._request_futures_coin_api("get", "klines", data=params)
def futures_coin_continous_klines(self, **params):
"""Kline/candlestick bars for a specific contract type. Klines are uniquely identified by their open time.
https://binance-docs.github.io/apidocs/delivery/en/#continuous-contract-kline-candlestick-data
"""
return self._request_futures_coin_api("get", "continuousKlines", data=params)
def futures_coin_index_price_klines(self, **params):
"""Kline/candlestick bars for the index price of a pair..
https://binance-docs.github.io/apidocs/delivery/en/#index-price-kline-candlestick-data
"""
return self._request_futures_coin_api("get", "indexPriceKlines", data=params)
def futures_coin_mark_price_klines(self, **params):
"""Kline/candlestick bars for the index price of a pair..
https://binance-docs.github.io/apidocs/delivery/en/#mark-price-kline-candlestick-data
"""
return self._request_futures_coin_api("get", "markPriceKlines", data=params)
def futures_coin_mark_price(self, **params):
"""Get Mark Price and Funding Rate
https://binance-docs.github.io/apidocs/delivery/en/#index-price-and-mark-price
"""
return self._request_futures_coin_api("get", "premiumIndex", data=params)
def futures_coin_funding_rate(self, **params):
"""Get funding rate history
https://binance-docs.github.io/apidocs/delivery/en/#get-funding-rate-history-of-perpetual-futures
"""
return self._request_futures_coin_api("get", "fundingRate", data=params)
def futures_coin_ticker(self, **params):
"""24 hour rolling window price change statistics.
https://binance-docs.github.io/apidocs/delivery/en/#24hr-ticker-price-change-statistics
"""
return self._request_futures_coin_api("get", "ticker/24hr", data=params)
def futures_coin_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://binance-docs.github.io/apidocs/delivery/en/#symbol-price-ticker
"""
return self._request_futures_coin_api("get", "ticker/price", data=params)
def futures_coin_orderbook_ticker(self, **params):
"""Best price/qty on the order book for a symbol or symbols.
https://binance-docs.github.io/apidocs/delivery/en/#symbol-order-book-ticker
"""
return self._request_futures_coin_api("get", "ticker/bookTicker", data=params)
def futures_coin_liquidation_orders(self, **params):
"""Get all liquidation orders
https://binance-docs.github.io/apidocs/delivery/en/#get-all-liquidation-orders
"""
return self._request_futures_coin_api("get", "allForceOrders", data=params)
def futures_coin_open_interest(self, **params):
"""Get present open interest of a specific symbol.
https://binance-docs.github.io/apidocs/delivery/en/#open-interest
"""
return self._request_futures_coin_api("get", "openInterest", data=params)
def futures_coin_open_interest_hist(self, **params):
"""Get open interest statistics of a specific symbol.
https://binance-docs.github.io/apidocs/delivery/en/#open-interest-statistics-market-data
"""
return self._request_futures_coin_data_api("get", "openInterestHist", data=params)
def futures_coin_leverage_bracket(self, **params):
"""Notional and Leverage Brackets
https://binance-docs.github.io/apidocs/delivery/en/#notional-bracket-for-pair-user_data
"""
return self._request_futures_coin_api(
"get", "leverageBracket", version=2, signed=True, data=params
)
def new_transfer_history(self, **params):
"""Get future account transaction history list
https://binance-docs.github.io/apidocs/delivery/en/#new-future-account-transfer
"""
return self._request_margin_api("get", "asset/transfer", True, data=params)
# return self._request_margin_api("get", "futures/transfer", True, data=params)
def universal_transfer(self, **params):
"""Unviversal transfer api accross different binance account types
https://binance-docs.github.io/apidocs/spot/en/#user-universal-transfer
"""
return self._request_margin_api(
"post", "asset/transfer", signed=True, data=params
)
def futures_coin_create_order(self, **params):
"""Send in a new order.
https://binance-docs.github.io/apidocs/delivery/en/#new-order-trade
"""
return self._request_futures_coin_api("post", "order", True, data=params)
def futures_coin_get_order(self, **params):
"""Check an order's status.
https://binance-docs.github.io/apidocs/delivery/en/#query-order-user_data
"""
return self._request_futures_coin_api("get", "order", True, data=params)
def futures_coin_get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://binance-docs.github.io/apidocs/delivery/en/#current-all-open-orders-user_data
"""
return self._request_futures_coin_api("get", "openOrders", True, data=params)
def futures_coin_get_all_orders(self, **params):
"""Get all futures account orders; active, canceled, or filled.
https://binance-docs.github.io/apidocs/delivery/en/#all-orders-user_data
"""
return self._request_futures_coin_api(
"get", "allOrders", signed=True, data=params
)
def futures_coin_cancel_order(self, **params):
"""Cancel an active futures order.
https://binance-docs.github.io/apidocs/delivery/en/#cancel-order-trade
"""
return self._request_futures_coin_api(
"delete", "order", signed=True, data=params
)
def futures_coin_cancel_all_open_orders(self, **params):
"""Cancel all open futures orders
https://binance-docs.github.io/apidocs/delivery/en/#cancel-all-open-orders-trade
"""
return self._request_futures_coin_api(
"delete", "allOpenOrders", signed=True, data=params
)
def futures_coin_cancel_orders(self, **params):
"""Cancel multiple futures orders
https://binance-docs.github.io/apidocs/delivery/en/#cancel-multiple-orders-trade
"""
return self._request_futures_coin_api(
"delete", "batchOrders", True, data=params
)
def futures_coin_account_balance(self, **params):
"""Get futures account balance
https://binance-docs.github.io/apidocs/delivery/en/#futures-account-balance-user_data
"""
return self._request_futures_coin_api(
"get", "balance", signed=True, data=params
)
def futures_coin_account(self, **params):
"""Get current account information.
https://binance-docs.github.io/apidocs/delivery/en/#account-information-user_data
"""
return self._request_futures_coin_api(
"get", "account", signed=True, data=params
)
def futures_coin_change_leverage(self, **params):
"""Change user's initial leverage of specific symbol market
https://binance-docs.github.io/apidocs/delivery/en/#change-initial-leverage-trade
"""
return self._request_futures_coin_api(
"post", "leverage", signed=True, data=params
)
def futures_coin_change_margin_type(self, **params):
"""Change the margin type for a symbol
https://binance-docs.github.io/apidocs/delivery/en/#change-margin-type-trade
"""
return self._request_futures_coin_api(
"post", "marginType", signed=True, data=params
)
def futures_coin_change_position_margin(self, **params):
"""Change the position margin for a symbol
https://binance-docs.github.io/apidocs/delivery/en/#modify-isolated-position-margin-trade
"""
return self._request_futures_coin_api(
"post", "positionMargin", True, data=params
)
def futures_coin_position_margin_history(self, **params):
"""Get position margin change history
https://binance-docs.github.io/apidocs/delivery/en/#get-position-margin-change-history-trade
"""
return self._request_futures_coin_api(
"get", "positionMargin/history", True, data=params
)
def futures_coin_position_information(self, **params):
"""Get position information
https://binance-docs.github.io/apidocs/delivery/en/#position-information-user_data
"""
return self._request_futures_coin_api("get", "positionRisk", True, data=params)
def futures_coin_account_trades(self, **params):
"""Get trades for the authenticated account and symbol.
https://binance-docs.github.io/apidocs/delivery/en/#account-trade-list-user_data
"""
return self._request_futures_coin_api("get", "userTrades", True, data=params)
def futures_coin_income_history(self, **params):
"""Get income history for authenticated account
https://binance-docs.github.io/apidocs/delivery/en/#get-income-history-user_data
"""
return self._request_futures_coin_api("get", "income", True, data=params)
def futures_coin_change_position_mode(self, **params):
"""Change user's position mode (Hedge Mode or One-way Mode ) on EVERY symbol
https://binance-docs.github.io/apidocs/delivery/en/#change-position-mode-trade
"""
return self._request_futures_coin_api("post", "positionSide/dual", True, data=params)
def futures_coin_get_position_mode(self, **params):
"""Get user's position mode (Hedge Mode or One-way Mode ) on EVERY symbol
https://binance-docs.github.io/apidocs/delivery/en/#get-current-position-mode-user_data
"""
return self._request_futures_coin_api("get", "positionSide/dual", True, data=params)
def get_all_coins_info(self, **params):
"""Get information of coins (available for deposit and withdraw) for user.
https://binance-docs.github.io/apidocs/spot/en/#all-coins-39-information-user_data
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"coin": "BTC",
"depositAllEnable": true,
"withdrawAllEnable": true,
"name": "Bitcoin",
"free": "0",
"locked": "0",
"freeze": "0",
"withdrawing": "0",
"ipoing": "0",
"ipoable": "0",
"storage": "0",
"isLegalMoney": false,
"trading": true,
"networkList": [
{
"network": "BNB",
"coin": "BTC",
"withdrawIntegerMultiple": "0.00000001",
"isDefault": false,
"depositEnable": true,
"withdrawEnable": true,
"depositDesc": "",
"withdrawDesc": "",
"specialTips": "Both a MEMO and an Address are required to successfully deposit your BEP2-BTCB tokens to Binance.",
"name": "BEP2",
"resetAddressStatus": false,
"addressRegex": "^(bnb1)[0-9a-z]{38}$",
"memoRegex": "^[0-9A-Za-z-_]{1,120}$",
"withdrawFee": "0.0000026",
"withdrawMin": "0.0000052",
"withdrawMax": "0",
"minConfirm": 1,
"unLockConfirm": 0
},
{
"network": "BTC",
"coin": "BTC",
"withdrawIntegerMultiple": "0.00000001",
"isDefault": true,
"depositEnable": true,
"withdrawEnable": true,
"depositDesc": "",
"withdrawDesc": "",
"specialTips": "",
"name": "BTC",
"resetAddressStatus": false,
"addressRegex": "^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$|^(bc1)[0-9A-Za-z]{39,59}$",
"memoRegex": "",
"withdrawFee": "0.0005",
"withdrawMin": "0.001",
"withdrawMax": "0",
"minConfirm": 1,
"unLockConfirm": 2
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'capital/config/getall', True, data=params)
def get_account_snapshot(self, **params):
"""Get daily account snapshot of specific type.
https://binance-docs.github.io/apidocs/spot/en/#daily-account-snapshot-user_data
:param type: required. Valid values are SPOT/MARGIN/FUTURES.
:type type: string
:param startTime: optional
:type startTime: int
:param endTime: optional
:type endTime: int
:param limit: optional
:type limit: int
:param recvWindow: optional
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"code":200, // 200 for success; others are error codes
"msg":"", // error message
"snapshotVos":[
{
"data":{
"balances":[
{
"asset":"BTC",
"free":"0.09905021",
"locked":"0.00000000"
},
{
"asset":"USDT",
"free":"1.89109409",
"locked":"0.00000000"
}
],
"totalAssetOfBtc":"0.09942700"
},
"type":"spot",
"updateTime":1576281599000
}
]
}
OR
.. code-block:: python
{
"code":200, // 200 for success; others are error codes
"msg":"", // error message
"snapshotVos":[
{
"data":{
"marginLevel":"2748.02909813",
"totalAssetOfBtc":"0.00274803",
"totalLiabilityOfBtc":"0.00000100",
"totalNetAssetOfBtc":"0.00274750",
"userAssets":[
{
"asset":"XRP",
"borrowed":"0.00000000",
"free":"1.00000000",
"interest":"0.00000000",
"locked":"0.00000000",
"netAsset":"1.00000000"
}
]
},
"type":"margin",
"updateTime":1576281599000
}
]
}
OR
.. code-block:: python
{
"code":200, // 200 for success; others are error codes
"msg":"", // error message
"snapshotVos":[
{
"data":{
"assets":[
{
"asset":"USDT",
"marginBalance":"118.99782335",
"walletBalance":"120.23811389"
}
],
"position":[
{
"entryPrice":"7130.41000000",
"markPrice":"7257.66239673",
"positionAmt":"0.01000000",
"symbol":"BTCUSDT",
"unRealizedProfit":"1.24029054"
}
]
},
"type":"futures",
"updateTime":1576281599000
}
]
}
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('get', 'accountSnapshot', True, data=params)
def disable_fast_withdraw_switch(self, **params):
"""Disable Fast Withdraw Switch
https://binance-docs.github.io/apidocs/spot/en/#disable-fast-withdraw-switch-user_data
:param recvWindow: optional
:type recvWindow: int
:returns: API response
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'disableFastWithdrawSwitch', True, data=params)
def enable_fast_withdraw_switch(self, **params):
"""Enable Fast Withdraw Switch
https://binance-docs.github.io/apidocs/spot/en/#enable-fast-withdraw-switch-user_data
:param recvWindow: optional
:type recvWindow: int
:returns: API response
:raises: BinanceRequestException, BinanceAPIException
"""
return self._request_margin_api('post', 'enableFastWithdrawSwitch', True, data=params)
|
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Top-level presubmit script for swarming-server.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gclient.
"""
def CommonChecks(input_api, output_api):
output = []
def join(*args):
return input_api.os_path.join(input_api.PresubmitLocalPath(), *args)
black_list = list(input_api.DEFAULT_BLACK_LIST) + [
r'ui2/node_modules/.*',
r'ui2/nodejs/.*',
r'.*_pb2\.py$',
r'.*_pb2_grpc\.py$',
]
disabled_warnings = [
'relative-import',
]
output.extend(input_api.canned_checks.RunPylint(
input_api, output_api,
black_list=black_list,
disabled_warnings=disabled_warnings))
test_directories = [
input_api.PresubmitLocalPath(),
join('server'),
join('swarming_bot'),
join('swarming_bot', 'api'),
join('swarming_bot', 'api', 'platforms'),
join('swarming_bot', 'bot_code'),
]
blacklist = [
# Never run the remote_smoke_test automatically. Should instead be run after
# uploading a server instance.
r'^remote_smoke_test\.py$'
]
tests = []
for directory in test_directories:
tests.extend(
input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api,
directory,
whitelist=[r'.+_test\.py$'],
blacklist=blacklist))
output.extend(input_api.RunTests(tests, parallel=True))
return output
# pylint: disable=unused-argument
def CheckChangeOnUpload(input_api, output_api):
return []
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
[swarming] disable local_smoke_test on presubmit
local_smoke_test has been migrated to another tryjob.
So no need to call in presubmit anymore.
https://crrev.com/c/1847535
Bug: 917479
Change-Id: Ifca430cfcd129a048e733d36a5aeb479c3be178d
Reviewed-on: https://chromium-review.googlesource.com/c/infra/luci/luci-py/+/1851984
Commit-Queue: Junji Watanabe <aaef61ef98ffea505db831f310be2a0eb8492537@google.com>
Auto-Submit: Junji Watanabe <aaef61ef98ffea505db831f310be2a0eb8492537@google.com>
Reviewed-by: Takuto Ikuta <71927f930fca81015e0f496c65cd126ab1a692e3@chromium.org>
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Top-level presubmit script for swarming-server.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gclient.
"""
def CommonChecks(input_api, output_api):
output = []
def join(*args):
return input_api.os_path.join(input_api.PresubmitLocalPath(), *args)
black_list = list(input_api.DEFAULT_BLACK_LIST) + [
r'ui2/node_modules/.*',
r'ui2/nodejs/.*',
r'.*_pb2\.py$',
r'.*_pb2_grpc\.py$',
]
disabled_warnings = [
'relative-import',
]
output.extend(input_api.canned_checks.RunPylint(
input_api, output_api,
black_list=black_list,
disabled_warnings=disabled_warnings))
test_directories = [
input_api.PresubmitLocalPath(),
join('server'),
join('swarming_bot'),
join('swarming_bot', 'api'),
join('swarming_bot', 'api', 'platforms'),
join('swarming_bot', 'bot_code'),
]
blacklist = [
# local_smoke_test runs on other tryjobs
r'^local_smoke_test\.py$',
# Never run the remote_smoke_test automatically. Should instead be run after
# uploading a server instance.
r'^remote_smoke_test\.py$'
]
tests = []
for directory in test_directories:
tests.extend(
input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api,
directory,
whitelist=[r'.+_test\.py$'],
blacklist=blacklist))
output.extend(input_api.RunTests(tests, parallel=True))
return output
# pylint: disable=unused-argument
def CheckChangeOnUpload(input_api, output_api):
return []
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
|
import json
import re
from django.utils.translation import ugettext as _
from ide.models.files import ResourceFile, ResourceIdentifier
__author__ = 'katharine'
def generate_wscript_file_sdk2(project, for_export=False):
jshint = project.app_jshint
wscript = """
#
# This file is the default set of rules to compile a Pebble project.
#
# Feel free to customize this to your needs.
#
import os.path
try:
from sh import CommandNotFound, jshint, cat, ErrorReturnCode_2
hint = jshint
except (ImportError, CommandNotFound):
hint = None
top = '.'
out = 'build'
def options(ctx):
ctx.load('pebble_sdk')
def configure(ctx):
ctx.load('pebble_sdk')
global hint
if hint is not None:
hint = hint.bake(['--config', 'pebble-jshintrc'])
def build(ctx):
if {{jshint}} and hint is not None:
try:
hint([node.abspath() for node in ctx.path.ant_glob("src/**/*.js")], _tty_out=False) # no tty because there are none in the cloudpebble sandbox.
except ErrorReturnCode_2 as e:
ctx.fatal("\\nJavaScript linting failed (you can disable this in Project Settings):\\n" + e.stdout)
# Concatenate all our JS files (but not recursively), and only if any JS exists in the first place.
ctx.path.make_node('src/js/').mkdir()
js_paths = ctx.path.ant_glob(['src/*.js', 'src/**/*.js'])
if js_paths:
ctx(rule='cat ${SRC} > ${TGT}', source=js_paths, target='pebble-js-app.js')
has_js = True
else:
has_js = False
ctx.load('pebble_sdk')
ctx.pbl_program(source=ctx.path.ant_glob('src/**/*.c'),
target='pebble-app.elf')
if os.path.exists('worker_src'):
ctx.pbl_worker(source=ctx.path.ant_glob('worker_src/**/*.c'),
target='pebble-worker.elf')
ctx.pbl_bundle(elf='pebble-app.elf',
worker_elf='pebble-worker.elf',
js='pebble-js-app.js' if has_js else [])
else:
ctx.pbl_bundle(elf='pebble-app.elf',
js='pebble-js-app.js' if has_js else [])
"""
return wscript.replace('{{jshint}}', 'True' if jshint and not for_export else 'False')
def generate_wscript_file_sdk3(project, for_export):
jshint = project.app_jshint
wscript = """
#
# This file is the default set of rules to compile a Pebble project.
#
# Feel free to customize this to your needs.
#
import os.path
try:
from sh import CommandNotFound, jshint, cat, ErrorReturnCode_2
hint = jshint
except (ImportError, CommandNotFound):
hint = None
top = '.'
out = 'build'
def options(ctx):
ctx.load('pebble_sdk')
def configure(ctx):
ctx.load('pebble_sdk')
def build(ctx):
if {{jshint}} and hint is not None:
try:
hint([node.abspath() for node in ctx.path.ant_glob("src/**/*.js")], _tty_out=False) # no tty because there are none in the cloudpebble sandbox.
except ErrorReturnCode_2 as e:
ctx.fatal("\\nJavaScript linting failed (you can disable this in Project Settings):\\n" + e.stdout)
# Concatenate all our JS files (but not recursively), and only if any JS exists in the first place.
ctx.path.make_node('src/js/').mkdir()
js_paths = ctx.path.ant_glob(['src/*.js', 'src/**/*.js'])
if js_paths:
ctx(rule='cat ${SRC} > ${TGT}', source=js_paths, target='pebble-js-app.js')
has_js = True
else:
has_js = False
ctx.load('pebble_sdk')
build_worker = os.path.exists('worker_src')
binaries = []
for p in ctx.env.target_platforms:
app_elf='{}/pebble-app.elf'.format(p)
ctx.pbl_program(source=ctx.path.ant_glob('src/**/*.c'),
target=app_elf,
platform=p)
if build_worker:
worker_elf='{}/pebble-worker.elf'.format(p)
binaries.append({'platform': p, 'app_elf': app_elf, 'worker_elf': worker_elf})
ctx.pbl_worker(source=ctx.path.ant_glob('worker_src/**/*.c'),
target=worker_elf,
platform=p)
else:
binaries.append({'platform': p, 'app_elf': app_elf})
ctx.pbl_bundle(binaries=binaries, js='pebble-js-app.js' if has_js else [])
"""
return wscript.replace('{{jshint}}', 'True' if jshint and not for_export else 'False')
def generate_wscript_file(project, for_export=False):
if project.sdk_version == '2':
return generate_wscript_file_sdk2(project, for_export)
elif project.sdk_version == '3':
return generate_wscript_file_sdk3(project, for_export)
def generate_jshint_file(project):
return """
/*
* Example jshint configuration file for Pebble development.
*
* Check out the full documentation at http://www.jshint.com/docs/options/
*/
{
// Declares the existence of the globals available in PebbleKit JS.
"globals": {
"Pebble": true,
"console": true,
"XMLHttpRequest": true,
"navigator": true, // For navigator.geolocation
"localStorage": true,
"setTimeout": true,
"setInterval": true
},
// Do not mess with standard JavaScript objects (Array, Date, etc)
"freeze": true,
// Do not use eval! Keep this warning turned on (ie: false)
"evil": false,
/*
* The options below are more style/developer dependent.
* Customize to your liking.
*/
// All variables should be in camelcase - too specific for CloudPebble builds to fail
// "camelcase": true,
// Do not allow blocks without { } - too specific for CloudPebble builds to fail.
// "curly": true,
// Prohibits the use of immediate function invocations without wrapping them in parentheses
"immed": true,
// Don't enforce indentation, because it's not worth failing builds over
// (especially given our somewhat lacklustre support for it)
"indent": false,
// Do not use a variable before it's defined
"latedef": "nofunc",
// Spot undefined variables
"undef": "true",
// Spot unused variables
"unused": "true"
}
"""
def generate_manifest(project, resources):
if project.project_type == 'native':
return generate_v2_manifest(project, resources)
elif project.project_type == 'pebblejs':
return generate_pebblejs_manifest(project, resources)
elif project.project_type == 'simplyjs':
return generate_simplyjs_manifest(project)
else:
raise Exception(_("Unknown project type %s") % project.project_type)
def generate_v2_manifest(project, resources):
return dict_to_pretty_json(generate_v2_manifest_dict(project, resources))
def generate_v2_manifest_dict(project, resources):
manifest = {
'uuid': str(project.app_uuid),
'shortName': project.app_short_name,
'longName': project.app_long_name,
'companyName': project.app_company_name,
'versionCode': project.app_version_code,
'versionLabel': project.app_version_label,
'watchapp': {
'watchface': project.app_is_watchface
},
'appKeys': json.loads(project.app_keys),
'resources': generate_resource_dict(project, resources),
'capabilities': project.app_capabilities.split(','),
'projectType': 'native',
'sdkVersion': "2",
}
return manifest
def generate_v3_manifest_dict(project, resources):
# Just extend the v2 one.
manifest = generate_v2_manifest_dict(project, resources)
manifest['targetPlatforms'] = project.app_platform_list
manifest['sdkVersion'] = "3"
return manifest
def generate_manifest_dict(project, resources):
if project.project_type == 'native':
if project.sdk_version == '2':
return generate_v2_manifest_dict(project, resources)
else:
return generate_v3_manifest_dict(project, resources)
elif project.project_type == 'simplyjs':
return generate_simplyjs_manifest_dict(project)
elif project.project_type == 'pebblejs':
return generate_pebblejs_manifest_dict(project, resources)
else:
raise Exception(_("Unknown project type %s") % project.project_type)
def generate_resource_map(project, resources):
return dict_to_pretty_json(generate_resource_dict(project, resources))
def dict_to_pretty_json(d):
return json.dumps(d, indent=4, separators=(',', ': '), sort_keys=True) + "\n"
def generate_resource_dict(project, resources):
if project.project_type == 'native':
return generate_v2_resource_dict(resources)
elif project.project_type == 'simplyjs':
return generate_simplyjs_resource_dict()
elif project.project_type == 'pebblejs':
return generate_pebblejs_resource_dict(resources)
else:
raise Exception(_("Unknown project type %s") % project.project_type)
def generate_v2_resource_dict(resources):
resource_map = {'media': []}
for resource in resources:
for resource_id in resource.get_identifiers():
d = {
'type': resource.kind,
'file': resource.root_path,
'name': resource_id.resource_id,
}
if resource_id.character_regex:
d['characterRegex'] = resource_id.character_regex
if resource_id.tracking:
d['trackingAdjust'] = resource_id.tracking
if resource.is_menu_icon:
d['menuIcon'] = True
if resource_id.compatibility is not None:
d['compatibility'] = resource_id.compatibility
resource_map['media'].append(d)
return resource_map
def generate_simplyjs_resource_dict():
return {
"media": [
{
"menuIcon": True,
"type": "png",
"name": "IMAGE_MENU_ICON",
"file": "images/menu_icon.png"
}, {
"type": "png",
"name": "IMAGE_LOGO_SPLASH",
"file": "images/logo_splash.png"
}, {
"type": "font",
"name": "MONO_FONT_14",
"file": "fonts/UbuntuMono-Regular.ttf"
}
]
}
def generate_pebblejs_resource_dict(resources):
media = [
{
"menuIcon": True, # This must be the first entry; we adjust it later.
"type": "png",
"name": "IMAGE_MENU_ICON",
"file": "images/menu_icon.png"
}, {
"type": "png",
"name": "IMAGE_LOGO_SPLASH",
"file": "images/logo_splash.png"
}, {
"type": "png",
"name": "IMAGE_TILE_SPLASH",
"file": "images/tile_splash.png"
}, {
"type": "font",
"name": "MONO_FONT_14",
"file": "fonts/UbuntuMono-Regular.ttf"
}
]
for resource in resources:
if resource.kind != 'png':
continue
d = {
'type': resource.kind,
'file': resource.path,
'name': re.sub(r'[^A-Z0-9_]', '_', resource.path.upper()),
}
if resource.is_menu_icon:
d['menuIcon'] = True
del media[0]['menuIcon']
media.append(d)
return {
'media': media
}
def generate_simplyjs_manifest(project):
return dict_to_pretty_json(generate_simplyjs_manifest_dict(project))
def generate_simplyjs_manifest_dict(project):
manifest = {
"uuid": project.app_uuid,
"shortName": project.app_short_name,
"longName": project.app_long_name,
"companyName": project.app_company_name,
"versionCode": project.app_version_code,
"versionLabel": project.app_version_label,
"capabilities": project.app_capabilities.split(','),
"watchapp": {
"watchface": project.app_is_watchface
},
"appKeys": {},
"resources": generate_simplyjs_resource_dict(),
"projectType": "simplyjs"
}
return manifest
def generate_pebblejs_manifest(project, resources):
return dict_to_pretty_json(generate_pebblejs_manifest_dict(project, resources))
def generate_pebblejs_manifest_dict(project, resources):
manifest = {
"uuid": project.app_uuid,
"shortName": project.app_short_name,
"longName": project.app_long_name,
"companyName": project.app_company_name,
"versionCode": project.app_version_code,
"versionLabel": project.app_version_label,
"capabilities": project.app_capabilities.split(','),
"watchapp": {
"watchface": project.app_is_watchface
},
"appKeys": {},
"resources": generate_pebblejs_resource_dict(resources),
"projectType": "pebblejs"
}
return manifest
Don't explode on null platform list. some more.
import json
import re
from django.utils.translation import ugettext as _
from ide.models.files import ResourceFile, ResourceIdentifier
__author__ = 'katharine'
def generate_wscript_file_sdk2(project, for_export=False):
jshint = project.app_jshint
wscript = """
#
# This file is the default set of rules to compile a Pebble project.
#
# Feel free to customize this to your needs.
#
import os.path
try:
from sh import CommandNotFound, jshint, cat, ErrorReturnCode_2
hint = jshint
except (ImportError, CommandNotFound):
hint = None
top = '.'
out = 'build'
def options(ctx):
ctx.load('pebble_sdk')
def configure(ctx):
ctx.load('pebble_sdk')
global hint
if hint is not None:
hint = hint.bake(['--config', 'pebble-jshintrc'])
def build(ctx):
if {{jshint}} and hint is not None:
try:
hint([node.abspath() for node in ctx.path.ant_glob("src/**/*.js")], _tty_out=False) # no tty because there are none in the cloudpebble sandbox.
except ErrorReturnCode_2 as e:
ctx.fatal("\\nJavaScript linting failed (you can disable this in Project Settings):\\n" + e.stdout)
# Concatenate all our JS files (but not recursively), and only if any JS exists in the first place.
ctx.path.make_node('src/js/').mkdir()
js_paths = ctx.path.ant_glob(['src/*.js', 'src/**/*.js'])
if js_paths:
ctx(rule='cat ${SRC} > ${TGT}', source=js_paths, target='pebble-js-app.js')
has_js = True
else:
has_js = False
ctx.load('pebble_sdk')
ctx.pbl_program(source=ctx.path.ant_glob('src/**/*.c'),
target='pebble-app.elf')
if os.path.exists('worker_src'):
ctx.pbl_worker(source=ctx.path.ant_glob('worker_src/**/*.c'),
target='pebble-worker.elf')
ctx.pbl_bundle(elf='pebble-app.elf',
worker_elf='pebble-worker.elf',
js='pebble-js-app.js' if has_js else [])
else:
ctx.pbl_bundle(elf='pebble-app.elf',
js='pebble-js-app.js' if has_js else [])
"""
return wscript.replace('{{jshint}}', 'True' if jshint and not for_export else 'False')
def generate_wscript_file_sdk3(project, for_export):
jshint = project.app_jshint
wscript = """
#
# This file is the default set of rules to compile a Pebble project.
#
# Feel free to customize this to your needs.
#
import os.path
try:
from sh import CommandNotFound, jshint, cat, ErrorReturnCode_2
hint = jshint
except (ImportError, CommandNotFound):
hint = None
top = '.'
out = 'build'
def options(ctx):
ctx.load('pebble_sdk')
def configure(ctx):
ctx.load('pebble_sdk')
def build(ctx):
if {{jshint}} and hint is not None:
try:
hint([node.abspath() for node in ctx.path.ant_glob("src/**/*.js")], _tty_out=False) # no tty because there are none in the cloudpebble sandbox.
except ErrorReturnCode_2 as e:
ctx.fatal("\\nJavaScript linting failed (you can disable this in Project Settings):\\n" + e.stdout)
# Concatenate all our JS files (but not recursively), and only if any JS exists in the first place.
ctx.path.make_node('src/js/').mkdir()
js_paths = ctx.path.ant_glob(['src/*.js', 'src/**/*.js'])
if js_paths:
ctx(rule='cat ${SRC} > ${TGT}', source=js_paths, target='pebble-js-app.js')
has_js = True
else:
has_js = False
ctx.load('pebble_sdk')
build_worker = os.path.exists('worker_src')
binaries = []
for p in ctx.env.target_platforms:
app_elf='{}/pebble-app.elf'.format(p)
ctx.pbl_program(source=ctx.path.ant_glob('src/**/*.c'),
target=app_elf,
platform=p)
if build_worker:
worker_elf='{}/pebble-worker.elf'.format(p)
binaries.append({'platform': p, 'app_elf': app_elf, 'worker_elf': worker_elf})
ctx.pbl_worker(source=ctx.path.ant_glob('worker_src/**/*.c'),
target=worker_elf,
platform=p)
else:
binaries.append({'platform': p, 'app_elf': app_elf})
ctx.pbl_bundle(binaries=binaries, js='pebble-js-app.js' if has_js else [])
"""
return wscript.replace('{{jshint}}', 'True' if jshint and not for_export else 'False')
def generate_wscript_file(project, for_export=False):
if project.sdk_version == '2':
return generate_wscript_file_sdk2(project, for_export)
elif project.sdk_version == '3':
return generate_wscript_file_sdk3(project, for_export)
def generate_jshint_file(project):
return """
/*
* Example jshint configuration file for Pebble development.
*
* Check out the full documentation at http://www.jshint.com/docs/options/
*/
{
// Declares the existence of the globals available in PebbleKit JS.
"globals": {
"Pebble": true,
"console": true,
"XMLHttpRequest": true,
"navigator": true, // For navigator.geolocation
"localStorage": true,
"setTimeout": true,
"setInterval": true
},
// Do not mess with standard JavaScript objects (Array, Date, etc)
"freeze": true,
// Do not use eval! Keep this warning turned on (ie: false)
"evil": false,
/*
* The options below are more style/developer dependent.
* Customize to your liking.
*/
// All variables should be in camelcase - too specific for CloudPebble builds to fail
// "camelcase": true,
// Do not allow blocks without { } - too specific for CloudPebble builds to fail.
// "curly": true,
// Prohibits the use of immediate function invocations without wrapping them in parentheses
"immed": true,
// Don't enforce indentation, because it's not worth failing builds over
// (especially given our somewhat lacklustre support for it)
"indent": false,
// Do not use a variable before it's defined
"latedef": "nofunc",
// Spot undefined variables
"undef": "true",
// Spot unused variables
"unused": "true"
}
"""
def generate_manifest(project, resources):
if project.project_type == 'native':
return generate_manifest(project, resources)
elif project.project_type == 'pebblejs':
return generate_pebblejs_manifest(project, resources)
elif project.project_type == 'simplyjs':
return generate_simplyjs_manifest(project)
else:
raise Exception(_("Unknown project type %s") % project.project_type)
def generate_v2_manifest(project, resources):
return dict_to_pretty_json(generate_v2_manifest_dict(project, resources))
def generate_v2_manifest_dict(project, resources):
manifest = {
'uuid': str(project.app_uuid),
'shortName': project.app_short_name,
'longName': project.app_long_name,
'companyName': project.app_company_name,
'versionCode': project.app_version_code,
'versionLabel': project.app_version_label,
'watchapp': {
'watchface': project.app_is_watchface
},
'appKeys': json.loads(project.app_keys),
'resources': generate_resource_dict(project, resources),
'capabilities': project.app_capabilities.split(','),
'projectType': 'native',
'sdkVersion': "2",
}
return manifest
def generate_v3_manifest_dict(project, resources):
# Just extend the v2 one.
manifest = generate_v2_manifest_dict(project, resources)
if len(project.app_platform_list) > 0:
manifest['targetPlatforms'] = project.app_platform_list
manifest['sdkVersion'] = "3"
return manifest
def generate_manifest_dict(project, resources):
if project.project_type == 'native':
if project.sdk_version == '2':
return generate_v2_manifest_dict(project, resources)
else:
return generate_v3_manifest_dict(project, resources)
elif project.project_type == 'simplyjs':
return generate_simplyjs_manifest_dict(project)
elif project.project_type == 'pebblejs':
return generate_pebblejs_manifest_dict(project, resources)
else:
raise Exception(_("Unknown project type %s") % project.project_type)
def generate_resource_map(project, resources):
return dict_to_pretty_json(generate_resource_dict(project, resources))
def dict_to_pretty_json(d):
return json.dumps(d, indent=4, separators=(',', ': '), sort_keys=True) + "\n"
def generate_resource_dict(project, resources):
if project.project_type == 'native':
return generate_v2_resource_dict(resources)
elif project.project_type == 'simplyjs':
return generate_simplyjs_resource_dict()
elif project.project_type == 'pebblejs':
return generate_pebblejs_resource_dict(resources)
else:
raise Exception(_("Unknown project type %s") % project.project_type)
def generate_v2_resource_dict(resources):
resource_map = {'media': []}
for resource in resources:
for resource_id in resource.get_identifiers():
d = {
'type': resource.kind,
'file': resource.root_path,
'name': resource_id.resource_id,
}
if resource_id.character_regex:
d['characterRegex'] = resource_id.character_regex
if resource_id.tracking:
d['trackingAdjust'] = resource_id.tracking
if resource.is_menu_icon:
d['menuIcon'] = True
if resource_id.compatibility is not None:
d['compatibility'] = resource_id.compatibility
resource_map['media'].append(d)
return resource_map
def generate_simplyjs_resource_dict():
return {
"media": [
{
"menuIcon": True,
"type": "png",
"name": "IMAGE_MENU_ICON",
"file": "images/menu_icon.png"
}, {
"type": "png",
"name": "IMAGE_LOGO_SPLASH",
"file": "images/logo_splash.png"
}, {
"type": "font",
"name": "MONO_FONT_14",
"file": "fonts/UbuntuMono-Regular.ttf"
}
]
}
def generate_pebblejs_resource_dict(resources):
media = [
{
"menuIcon": True, # This must be the first entry; we adjust it later.
"type": "png",
"name": "IMAGE_MENU_ICON",
"file": "images/menu_icon.png"
}, {
"type": "png",
"name": "IMAGE_LOGO_SPLASH",
"file": "images/logo_splash.png"
}, {
"type": "png",
"name": "IMAGE_TILE_SPLASH",
"file": "images/tile_splash.png"
}, {
"type": "font",
"name": "MONO_FONT_14",
"file": "fonts/UbuntuMono-Regular.ttf"
}
]
for resource in resources:
if resource.kind != 'png':
continue
d = {
'type': resource.kind,
'file': resource.path,
'name': re.sub(r'[^A-Z0-9_]', '_', resource.path.upper()),
}
if resource.is_menu_icon:
d['menuIcon'] = True
del media[0]['menuIcon']
media.append(d)
return {
'media': media
}
def generate_simplyjs_manifest(project):
return dict_to_pretty_json(generate_simplyjs_manifest_dict(project))
def generate_simplyjs_manifest_dict(project):
manifest = {
"uuid": project.app_uuid,
"shortName": project.app_short_name,
"longName": project.app_long_name,
"companyName": project.app_company_name,
"versionCode": project.app_version_code,
"versionLabel": project.app_version_label,
"capabilities": project.app_capabilities.split(','),
"watchapp": {
"watchface": project.app_is_watchface
},
"appKeys": {},
"resources": generate_simplyjs_resource_dict(),
"projectType": "simplyjs"
}
return manifest
def generate_pebblejs_manifest(project, resources):
return dict_to_pretty_json(generate_pebblejs_manifest_dict(project, resources))
def generate_pebblejs_manifest_dict(project, resources):
manifest = {
"uuid": project.app_uuid,
"shortName": project.app_short_name,
"longName": project.app_long_name,
"companyName": project.app_company_name,
"versionCode": project.app_version_code,
"versionLabel": project.app_version_label,
"capabilities": project.app_capabilities.split(','),
"watchapp": {
"watchface": project.app_is_watchface
},
"appKeys": {},
"resources": generate_pebblejs_resource_dict(resources),
"projectType": "pebblejs"
}
return manifest
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import shutil
import tarfile
from contextlib import closing
from cloudify_rest_client.exceptions import CloudifyClientError, \
UserUnauthorizedError
from . import init
from .. import env
from ..table import print_data
from .. import utils
from ..cli import cfy
from .. import constants
from ..cli import helptexts
from ..exceptions import CloudifyCliError
EXPORTED_KEYS_DIRNAME = '.exported-ssh-keys'
EXPORTED_SSH_KEYS_DIR = os.path.join(env.PROFILES_DIR, EXPORTED_KEYS_DIRNAME)
PROFILE_COLUMNS = ['name', 'manager_ip', 'manager_username', 'manager_tenant',
'ssh_user', 'ssh_key_path', 'ssh_port',
'rest_port', 'rest_protocol', 'rest_certificate']
@cfy.group(name='profiles')
@cfy.options.verbose()
def profiles():
"""Handle Cloudify CLI profiles
Each profile can manage a single Cloudify manager.
A profile is automatically created when using the `cfy profiles use`
command.
Profiles are named according to the IP of the manager they manage.
"""
if not env.is_initialized():
init.init_local_profile()
@profiles.command(name='show-current',
short_help='Retrieve current profile information')
@cfy.options.verbose()
@cfy.pass_logger
def show(logger):
"""Shows your current active profile and it's properties
"""
active_profile_name = env.get_active_profile()
if active_profile_name == 'local':
logger.info("You're currently working in local mode. "
"To use a manager run `cfy profiles use MANAGER_IP`")
return
active_profile = _get_profile(env.get_active_profile())
print_data(PROFILE_COLUMNS, active_profile, 'Active profile:')
@profiles.command(name='list',
short_help='List profiles')
@cfy.options.verbose()
@cfy.pass_logger
def list(logger):
"""List all profiles
"""
current_profile = env.get_active_profile()
profiles = []
profile_names = env.get_profile_names()
for profile in profile_names:
profile_data = _get_profile(profile)
if profile == current_profile:
# Show the currently active profile by appending *
profile_data['name'] = '*' + profile_data['name']
profiles.append(profile_data)
if profiles:
logger.info('Listing all profiles...')
print_data(PROFILE_COLUMNS, profiles, 'Profiles:')
if not profile_names:
logger.info(
'No profiles found. You can create a new profile '
'by using an existing manager via the `cfy profiles use` command')
@profiles.command(name='use',
short_help='Control a specific manager')
@cfy.argument('manager-ip')
@cfy.options.profile_name
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.manager_username
@cfy.options.manager_password
@cfy.options.manager_tenant
@cfy.options.rest_port
@cfy.options.ssl_rest
@cfy.options.rest_certificate
@cfy.options.skip_credentials_validation
@cfy.options.verbose()
@cfy.pass_logger
def use(manager_ip,
profile_name,
skip_credentials_validation,
logger,
**kwargs):
"""Control a specific manager
`PROFILE_NAME` can be either a manager IP or `local`.
Additional CLI commands will be added after a manager is used.
To stop using a manager, you can run `cfy init -r`.
"""
if not profile_name:
profile_name = manager_ip
if profile_name == 'local':
logger.info('Using local environment...')
if not env.is_profile_exists(profile_name):
init.init_local_profile()
env.set_active_profile('local')
return
if env.is_profile_exists(profile_name):
_switch_profile(
manager_ip=manager_ip,
profile_name=profile_name,
logger=logger,
**kwargs)
else:
_create_profile(
manager_ip=manager_ip,
profile_name=profile_name,
skip_credentials_validation=skip_credentials_validation,
logger=logger,
**kwargs)
def _switch_profile(manager_ip, profile_name, logger, **kwargs):
# if using an existing profile, it is an error to provide any --option,
# because the way to update an existing profile is `cfy profiles set`
provided_options = [key for key, value in kwargs.items() if value]
if any(provided_options):
logger.warning('Profile {0} already exists. '
'The passed in options are ignored: {1}. '
'To update the profile, use `cfy profiles set`'
.format(profile_name, ', '.join(provided_options)))
env.set_active_profile(profile_name)
logger.info('Using manager {0}'.format(profile_name))
def _create_profile(
manager_ip,
profile_name,
ssh_user,
ssh_key,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
ssl,
rest_certificate,
skip_credentials_validation,
logger):
# If REST certificate is provided, then automatically
# assume SSL.
if rest_certificate:
ssl = True
rest_protocol = constants.SECURED_REST_PROTOCOL if ssl else \
constants.DEFAULT_REST_PROTOCOL
if not rest_port:
rest_port = constants.SECURED_REST_PORT if ssl else \
constants.DEFAULT_REST_PORT
logger.info('Attempting to connect to {0} through port {1}, using {2} '
'(SSL mode: {3})...'.format(manager_ip, rest_port,
rest_protocol, ssl))
# First, attempt to get the provider from the manager - should it fail,
# the manager's profile directory won't be created
provider_context = _get_provider_context(
profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
skip_credentials_validation
)
init.init_manager_profile(profile_name=profile_name)
env.set_active_profile(profile_name)
logger.info('Using manager {0} with port {1}'.format(
manager_ip, rest_port))
_set_profile_context(
profile_name,
provider_context,
manager_ip,
ssh_key,
ssh_user,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
rest_protocol,
rest_certificate
)
@profiles.command(name='delete',
short_help='Delete a profile')
@cfy.argument('profile-name')
@cfy.options.verbose()
@cfy.pass_logger
def delete(profile_name, logger):
"""Delete a profile
`PROFILE_NAME` is the IP of the manager the profile manages.
"""
logger.info('Deleting profile {0}...'.format(profile_name))
try:
env.delete_profile(profile_name)
logger.info('Profile deleted')
except CloudifyCliError as ex:
logger.info(str(ex))
def set_profile(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
skip_credentials_validation,
logger):
"""Set the profile name, manager username and/or password and/or tenant
and/or ssl state (on/off) in the *current* profile
"""
if not any([profile_name, ssh_user, ssh_key, ssh_port, manager_username,
manager_password, manager_tenant, ssl, rest_certificate]):
raise CloudifyCliError(
"You must supply at least one of the following: "
"profile name, username, password, tenant, "
"ssl, rest certificate, ssh user, ssh key, ssh port")
username = manager_username or env.get_username()
password = manager_password or env.get_password()
tenant = manager_tenant or env.get_tenant_name()
if not skip_credentials_validation:
_validate_credentials(username, password, tenant, rest_certificate)
old_name = None
if profile_name:
if profile_name == 'local':
raise CloudifyCliError('Cannot use the reserved name "local"')
if env.is_profile_exists(profile_name):
raise CloudifyCliError('Profile {0} already exists'
.format(profile_name))
old_name = env.profile.profile_name
env.profile.profile_name = profile_name
if manager_username:
logger.info('Setting username to `{0}`'.format(manager_username))
env.profile.manager_username = manager_username
if manager_password:
logger.info('Setting password to `{0}`'.format(manager_password))
env.profile.manager_password = manager_password
if manager_tenant:
logger.info('Setting tenant to `{0}`'.format(manager_tenant))
env.profile.manager_tenant = manager_tenant
if ssl is not None:
ssl = str(ssl).lower()
if ssl == 'on':
logger.info('Enabling SSL in the local profile')
env.profile.rest_port = constants.SECURED_REST_PORT
env.profile.rest_protocol = constants.SECURED_REST_PROTOCOL
elif ssl == 'off':
logger.info('Disabling SSL in the local profile')
env.profile.rest_port = constants.DEFAULT_REST_PORT
env.profile.rest_protocol = constants.DEFAULT_REST_PROTOCOL
else:
raise CloudifyCliError('SSL must be either `on` or `off`')
if rest_certificate:
logger.info(
'Setting rest certificate to `{0}`'.format(rest_certificate))
env.profile.rest_certificate = rest_certificate
if ssh_user:
logger.info('Setting ssh user to `{0}`'.format(ssh_user))
env.profile.ssh_user = ssh_user
if ssh_key:
logger.info('Setting ssh key to `{0}`'.format(ssh_key))
env.profile.ssh_key = ssh_key
if ssh_port:
logger.info('Setting ssh port to `{0}`'.format(ssh_port))
env.profile.ssh_port = ssh_port
env.profile.save()
if old_name is not None:
env.set_active_profile(profile_name)
env.delete_profile(old_name)
logger.info('Settings saved successfully')
@profiles.command(
name='set',
short_help='Set name/manager username/password/tenant in current profile')
@cfy.options.profile_name
@cfy.options.manager_username
@cfy.options.manager_password
@cfy.options.manager_tenant
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.ssl_state
@cfy.options.rest_certificate
@cfy.options.skip_credentials_validation
@cfy.options.verbose()
@cfy.pass_logger
def set_cmd(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
skip_credentials_validation,
logger):
return set_profile(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
skip_credentials_validation,
logger)
@profiles.command(
name='set-cluster',
short_help='Set connection options for a cluster node')
@cfy.argument('cluster-node-name')
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.rest_certificate
@cfy.pass_logger
def set_cluster(cluster_node_name,
ssh_user,
ssh_key,
ssh_port,
rest_certificate,
logger):
"""Set connection options for a cluster node.
`CLUSTER_NODE_NAME` is the name of the cluster node to set options for.
"""
if not env.profile.cluster:
err = CloudifyCliError('The current profile is not a cluster profile!')
err.possible_solutions = [
"Select a different profile using `cfy profiles use`",
"Run `cfy cluster update-profile`"
]
raise err
changed_node = None
for node in env.profile.cluster:
if node['name'] == cluster_node_name:
changed_node = node
break
else:
raise CloudifyCliError(
'Node {0} not found in the cluster'.format(cluster_node_name))
for source, target, label in [
(ssh_user, 'ssh_user', 'ssh user'),
(ssh_key, 'ssh_key', 'ssh key'),
(ssh_port, 'ssh_port', 'ssh port'),
(rest_certificate, 'cert', 'rest certificate'),
]:
if source:
changed_node[target] = source
logger.info('Node {0}: setting {1} to `{2}`'
.format(cluster_node_name, label, source))
env.profile.save()
logger.info('Settings saved successfully')
@profiles.command(
name='unset',
short_help='Clear manager username/password/tenant from current profile')
@cfy.options.manager_username_flag
@cfy.options.manager_password_flag
@cfy.options.manager_tenant_flag
@cfy.options.ssh_user_flag
@cfy.options.ssh_key_flag
@cfy.options.rest_certificate_flag
@cfy.options.skip_credentials_validation
@cfy.options.verbose()
@cfy.pass_logger
def unset(manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
rest_certificate,
skip_credentials_validation,
logger):
"""Clear the manager username and/or password and/or tenant
from the *current* profile
"""
if not any([manager_username, manager_password, manager_tenant,
rest_certificate, ssh_user, ssh_key]):
raise CloudifyCliError("You must choose at least one of the following:"
" username, password, tenant, "
"rest certificate, ssh user, ssh key")
if manager_username:
username = os.environ.get(constants.CLOUDIFY_USERNAME_ENV)
else:
username = env.profile.manager_username
if manager_password:
password = os.environ.get(constants.CLOUDIFY_PASSWORD_ENV)
else:
password = env.profile.manager_password
if manager_tenant:
tenant = os.environ.get(constants.CLOUDIFY_TENANT_ENV)
else:
tenant = env.profile.manager_tenant
if rest_certificate:
cert = os.environ.get(constants.LOCAL_REST_CERT_FILE) \
or env.get_default_rest_cert_local_path()
else:
cert = None
if not skip_credentials_validation:
_validate_credentials(username, password, tenant, cert)
if manager_username:
logger.info('Clearing manager username')
env.profile.manager_username = None
if manager_password:
logger.info('Clearing manager password')
env.profile.manager_password = None
if manager_tenant:
logger.info('Clearing manager tenant')
env.profile.manager_tenant = None
if rest_certificate:
logger.info('Clearing rest certificate')
env.profile.rest_certificate = None
if ssh_user:
logger.info('Clearing ssh user')
env.profile.ssh_user = None
if ssh_key:
logger.info('Clearing ssh key')
env.profile.ssh_key = None
env.profile.save()
logger.info('Settings saved successfully')
@profiles.command(name='export',
short_help='Export all profiles to an archive')
@cfy.options.include_keys(helptexts.EXPORT_SSH_KEYS)
@cfy.options.optional_output_path
@cfy.options.verbose()
@cfy.pass_logger
def export_profiles(include_keys, output_path, logger):
"""Export all profiles to a file
WARNING: Including the ssh keys of your profiles in the archive means
that once the profiles are imported, the ssh keys will be put back
in their original locations!
If `-o / --output-path` is omitted, the archive's name will be
`cfy-profiles.tar.gz`.
"""
_assert_profiles_exist()
destination = output_path or \
os.path.join(os.getcwd(), 'cfy-profiles.tar.gz')
# TODO: Copy exported ssh keys to each profile's directory
logger.info('Exporting profiles to {0}...'.format(destination))
if include_keys:
for profile in env.get_profile_names():
_backup_ssh_key(profile)
utils.tar(env.PROFILES_DIR, destination)
if include_keys:
shutil.rmtree(EXPORTED_SSH_KEYS_DIR)
logger.info('Export complete!')
logger.info(
'You can import the profiles by running '
'`cfy profiles import PROFILES_ARCHIVE`')
@profiles.command(name='import',
short_help='Import profiles from an archive')
@cfy.argument('archive-path')
@cfy.options.include_keys(helptexts.IMPORT_SSH_KEYS)
@cfy.options.verbose()
@cfy.pass_logger
def import_profiles(archive_path, include_keys, logger):
"""Import profiles from a profiles archive
WARNING: If a profile exists both in the archive and locally
it will be overwritten (any other profiles will be left intact).
`ARCHIVE_PATH` is the path to the profiles archive to import.
"""
_assert_is_tarfile(archive_path)
_assert_profiles_archive(archive_path)
logger.info('Importing profiles from {0}...'.format(archive_path))
utils.untar(archive_path, os.path.dirname(env.PROFILES_DIR))
if include_keys:
for profile in env.get_profile_names():
_restore_ssh_key(profile)
else:
if EXPORTED_KEYS_DIRNAME in os.listdir(env.PROFILES_DIR):
logger.info("The profiles archive you provided contains ssh keys "
"for one or more profiles. To restore those keys to "
"their original locations, you can use the "
"`--include-keys flag or copy them manually from {0} "
.format(EXPORTED_SSH_KEYS_DIR))
logger.info('Import complete!')
logger.info('You can list profiles using `cfy profiles list`')
def _assert_profiles_exist():
if not env.get_profile_names():
raise CloudifyCliError('No profiles to export')
def _assert_profiles_archive(archive_path):
with closing(tarfile.open(name=archive_path)) as tar:
if not tar.getmembers()[0].name == 'profiles':
raise CloudifyCliError(
'The archive provided does not seem to be a valid '
'Cloudify profiles archive')
def _assert_is_tarfile(archive_path):
if not tarfile.is_tarfile(archive_path):
raise CloudifyCliError('The archive provided must be a tar.gz archive')
def _backup_ssh_key(profile):
return _move_ssh_key(profile, is_backup=True)
def _restore_ssh_key(profile):
return _move_ssh_key(profile, is_backup=False)
@cfy.pass_logger
def _move_ssh_key(profile, logger, is_backup):
"""Iterate through all profiles and move their ssh keys
This is how we backup and restore ssh keys.
"""
context = env.get_profile_context(profile)
key_filepath = context.ssh_key
if key_filepath:
backup_path = os.path.join(
EXPORTED_SSH_KEYS_DIR, os.path.basename(key_filepath)) + \
'.{0}.profile'.format(profile)
if is_backup:
if not os.path.isdir(EXPORTED_SSH_KEYS_DIR):
os.makedirs(EXPORTED_SSH_KEYS_DIR)
logger.info('Copying ssh key {0} to {1}...'.format(
key_filepath, backup_path))
shutil.copy2(key_filepath, backup_path)
else:
if os.path.isfile(backup_path):
logger.info(
'Restoring ssh key for profile {0} to {1}...'.format(
profile, key_filepath))
shutil.move(backup_path, key_filepath)
def _get_profile(profile_name):
current_profile = env.get_active_profile()
env.set_active_profile(profile_name)
context = env.get_profile_context(profile_name)
env.set_active_profile(current_profile)
return context.to_dict()
def _assert_manager_available(client, profile_name):
try:
return client.manager.get_status()
except UserUnauthorizedError, e:
raise CloudifyCliError(
"Can't use manager {0}\n{1}.".format(
profile_name,
str(e)
)
)
# The problem here is that, for instance,
# any problem raised by the rest client will trigger this.
# Triggering a CloudifyClientError only doesn't actually deal
# with situations like No route to host and the likes.
except Exception as ex:
raise CloudifyCliError(
"Can't use manager {0}. {1}".format(profile_name, str(ex.message)))
def _get_provider_context(profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
skip_credentials_validation):
try:
client = _get_client_and_assert_manager(
profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant
)
except CloudifyCliError:
if skip_credentials_validation:
return None
raise
try:
response = client.manager.get_context()
return response['context']
except CloudifyClientError:
return None
def _get_client_and_assert_manager(profile_name,
manager_ip=None,
rest_port=None,
rest_protocol=None,
rest_certificate=None,
manager_username=None,
manager_password=None,
manager_tenant=None):
# Attempt to update the profile with an existing profile context, if one
# is available. This is relevant in case the user didn't pass a username
# or a password, and was expecting them to be taken from the old profile
env.profile = env.get_profile_context(profile_name, suppress_error=True)
client = env.get_rest_client(
rest_host=manager_ip,
rest_port=rest_port,
rest_protocol=rest_protocol,
rest_cert=rest_certificate,
skip_version_check=True,
username=manager_username,
password=manager_password,
tenant_name=manager_tenant
)
_assert_manager_available(client, profile_name)
return client
def _set_profile_context(profile_name,
provider_context,
manager_ip,
ssh_key,
ssh_user,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
rest_protocol,
rest_certificate):
profile = env.get_profile_context(profile_name)
profile.provider_context = provider_context
if profile_name:
profile.profile_name = profile_name
if manager_ip:
profile.manager_ip = manager_ip
if ssh_key:
profile.ssh_key = ssh_key
if ssh_user:
profile.ssh_user = ssh_user
if rest_port:
profile.rest_port = rest_port
if manager_username:
profile.manager_username = manager_username
if manager_password:
profile.manager_password = manager_password
if manager_tenant:
profile.manager_tenant = manager_tenant
profile.ssh_port = ssh_port or constants.REMOTE_EXECUTION_PORT
profile.rest_protocol = rest_protocol
profile.rest_certificate = rest_certificate
profile.save()
def _is_manager_secured(response_history):
""" Checks if the manager is secured (ssl enabled)
The manager is secured if the request was redirected to https
"""
if response_history:
first_response = response_history[0]
return first_response.is_redirect \
and first_response.headers['location'].startswith('https')
return False
@cfy.pass_logger
def _validate_credentials(username, password, tenant, certificate, logger):
logger.info('Validating credentials...')
_get_client_and_assert_manager(
profile_name=env.profile.profile_name,
manager_username=username,
manager_password=password,
manager_tenant=tenant,
rest_certificate=certificate
)
logger.info('Credentials validated')
CFY-7722 Mark cluster profiles https on `ssl enable` (#758)
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import shutil
import tarfile
from contextlib import closing
from cloudify_rest_client.exceptions import CloudifyClientError, \
UserUnauthorizedError
from . import init
from .. import env
from ..table import print_data
from .. import utils
from ..cli import cfy
from .. import constants
from ..cli import helptexts
from ..exceptions import CloudifyCliError
EXPORTED_KEYS_DIRNAME = '.exported-ssh-keys'
EXPORTED_SSH_KEYS_DIR = os.path.join(env.PROFILES_DIR, EXPORTED_KEYS_DIRNAME)
PROFILE_COLUMNS = ['name', 'manager_ip', 'manager_username', 'manager_tenant',
'ssh_user', 'ssh_key_path', 'ssh_port',
'rest_port', 'rest_protocol', 'rest_certificate']
@cfy.group(name='profiles')
@cfy.options.verbose()
def profiles():
"""Handle Cloudify CLI profiles
Each profile can manage a single Cloudify manager.
A profile is automatically created when using the `cfy profiles use`
command.
Profiles are named according to the IP of the manager they manage.
"""
if not env.is_initialized():
init.init_local_profile()
@profiles.command(name='show-current',
short_help='Retrieve current profile information')
@cfy.options.verbose()
@cfy.pass_logger
def show(logger):
"""Shows your current active profile and it's properties
"""
active_profile_name = env.get_active_profile()
if active_profile_name == 'local':
logger.info("You're currently working in local mode. "
"To use a manager run `cfy profiles use MANAGER_IP`")
return
active_profile = _get_profile(env.get_active_profile())
print_data(PROFILE_COLUMNS, active_profile, 'Active profile:')
@profiles.command(name='list',
short_help='List profiles')
@cfy.options.verbose()
@cfy.pass_logger
def list(logger):
"""List all profiles
"""
current_profile = env.get_active_profile()
profiles = []
profile_names = env.get_profile_names()
for profile in profile_names:
profile_data = _get_profile(profile)
if profile == current_profile:
# Show the currently active profile by appending *
profile_data['name'] = '*' + profile_data['name']
profiles.append(profile_data)
if profiles:
logger.info('Listing all profiles...')
print_data(PROFILE_COLUMNS, profiles, 'Profiles:')
if not profile_names:
logger.info(
'No profiles found. You can create a new profile '
'by using an existing manager via the `cfy profiles use` command')
@profiles.command(name='use',
short_help='Control a specific manager')
@cfy.argument('manager-ip')
@cfy.options.profile_name
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.manager_username
@cfy.options.manager_password
@cfy.options.manager_tenant
@cfy.options.rest_port
@cfy.options.ssl_rest
@cfy.options.rest_certificate
@cfy.options.skip_credentials_validation
@cfy.options.verbose()
@cfy.pass_logger
def use(manager_ip,
profile_name,
skip_credentials_validation,
logger,
**kwargs):
"""Control a specific manager
`PROFILE_NAME` can be either a manager IP or `local`.
Additional CLI commands will be added after a manager is used.
To stop using a manager, you can run `cfy init -r`.
"""
if not profile_name:
profile_name = manager_ip
if profile_name == 'local':
logger.info('Using local environment...')
if not env.is_profile_exists(profile_name):
init.init_local_profile()
env.set_active_profile('local')
return
if env.is_profile_exists(profile_name):
_switch_profile(
manager_ip=manager_ip,
profile_name=profile_name,
logger=logger,
**kwargs)
else:
_create_profile(
manager_ip=manager_ip,
profile_name=profile_name,
skip_credentials_validation=skip_credentials_validation,
logger=logger,
**kwargs)
def _switch_profile(manager_ip, profile_name, logger, **kwargs):
# if using an existing profile, it is an error to provide any --option,
# because the way to update an existing profile is `cfy profiles set`
provided_options = [key for key, value in kwargs.items() if value]
if any(provided_options):
logger.warning('Profile {0} already exists. '
'The passed in options are ignored: {1}. '
'To update the profile, use `cfy profiles set`'
.format(profile_name, ', '.join(provided_options)))
env.set_active_profile(profile_name)
logger.info('Using manager {0}'.format(profile_name))
def _create_profile(
manager_ip,
profile_name,
ssh_user,
ssh_key,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
ssl,
rest_certificate,
skip_credentials_validation,
logger):
# If REST certificate is provided, then automatically
# assume SSL.
if rest_certificate:
ssl = True
rest_protocol = constants.SECURED_REST_PROTOCOL if ssl else \
constants.DEFAULT_REST_PROTOCOL
if not rest_port:
rest_port = constants.SECURED_REST_PORT if ssl else \
constants.DEFAULT_REST_PORT
logger.info('Attempting to connect to {0} through port {1}, using {2} '
'(SSL mode: {3})...'.format(manager_ip, rest_port,
rest_protocol, ssl))
# First, attempt to get the provider from the manager - should it fail,
# the manager's profile directory won't be created
provider_context = _get_provider_context(
profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
skip_credentials_validation
)
init.init_manager_profile(profile_name=profile_name)
env.set_active_profile(profile_name)
logger.info('Using manager {0} with port {1}'.format(
manager_ip, rest_port))
_set_profile_context(
profile_name,
provider_context,
manager_ip,
ssh_key,
ssh_user,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
rest_protocol,
rest_certificate
)
@profiles.command(name='delete',
short_help='Delete a profile')
@cfy.argument('profile-name')
@cfy.options.verbose()
@cfy.pass_logger
def delete(profile_name, logger):
"""Delete a profile
`PROFILE_NAME` is the IP of the manager the profile manages.
"""
logger.info('Deleting profile {0}...'.format(profile_name))
try:
env.delete_profile(profile_name)
logger.info('Profile deleted')
except CloudifyCliError as ex:
logger.info(str(ex))
def set_profile(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
skip_credentials_validation,
logger):
"""Set the profile name, manager username and/or password and/or tenant
and/or ssl state (on/off) in the *current* profile
"""
if not any([profile_name, ssh_user, ssh_key, ssh_port, manager_username,
manager_password, manager_tenant, ssl, rest_certificate]):
raise CloudifyCliError(
"You must supply at least one of the following: "
"profile name, username, password, tenant, "
"ssl, rest certificate, ssh user, ssh key, ssh port")
username = manager_username or env.get_username()
password = manager_password or env.get_password()
tenant = manager_tenant or env.get_tenant_name()
if not skip_credentials_validation:
_validate_credentials(username, password, tenant, rest_certificate)
old_name = None
if profile_name:
if profile_name == 'local':
raise CloudifyCliError('Cannot use the reserved name "local"')
if env.is_profile_exists(profile_name):
raise CloudifyCliError('Profile {0} already exists'
.format(profile_name))
old_name = env.profile.profile_name
env.profile.profile_name = profile_name
if manager_username:
logger.info('Setting username to `{0}`'.format(manager_username))
env.profile.manager_username = manager_username
if manager_password:
logger.info('Setting password to `{0}`'.format(manager_password))
env.profile.manager_password = manager_password
if manager_tenant:
logger.info('Setting tenant to `{0}`'.format(manager_tenant))
env.profile.manager_tenant = manager_tenant
if ssl is not None:
_set_profile_ssl(ssl, logger)
if rest_certificate:
logger.info(
'Setting rest certificate to `{0}`'.format(rest_certificate))
env.profile.rest_certificate = rest_certificate
if ssh_user:
logger.info('Setting ssh user to `{0}`'.format(ssh_user))
env.profile.ssh_user = ssh_user
if ssh_key:
logger.info('Setting ssh key to `{0}`'.format(ssh_key))
env.profile.ssh_key = ssh_key
if ssh_port:
logger.info('Setting ssh port to `{0}`'.format(ssh_port))
env.profile.ssh_port = ssh_port
env.profile.save()
if old_name is not None:
env.set_active_profile(profile_name)
env.delete_profile(old_name)
logger.info('Settings saved successfully')
def _set_profile_ssl(ssl, logger):
ssl = str(ssl).lower()
if ssl == 'on':
logger.info('Enabling SSL in the local profile')
port = constants.SECURED_REST_PORT
protocol = constants.SECURED_REST_PROTOCOL
elif ssl == 'off':
logger.info('Disabling SSL in the local profile')
port = constants.DEFAULT_REST_PORT
protocol = constants.DEFAULT_REST_PROTOCOL
else:
raise CloudifyCliError('SSL must be either `on` or `off`')
env.profile.rest_port = port
env.profile.rest_protocol = protocol
if env.profile.cluster:
for node in env.profile.cluster:
node['rest_port'] = port
node['rest_protocol'] = protocol
node['trust_all'] = True
logger.info('Enabling SSL for {0} without certificate validation'
.format(node['manager_ip']))
@profiles.command(
name='set',
short_help='Set name/manager username/password/tenant in current profile')
@cfy.options.profile_name
@cfy.options.manager_username
@cfy.options.manager_password
@cfy.options.manager_tenant
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.ssl_state
@cfy.options.rest_certificate
@cfy.options.skip_credentials_validation
@cfy.options.verbose()
@cfy.pass_logger
def set_cmd(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
skip_credentials_validation,
logger):
return set_profile(profile_name,
manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
ssh_port,
ssl,
rest_certificate,
skip_credentials_validation,
logger)
@profiles.command(
name='set-cluster',
short_help='Set connection options for a cluster node')
@cfy.argument('cluster-node-name')
@cfy.options.ssh_user
@cfy.options.ssh_key
@cfy.options.ssh_port
@cfy.options.rest_certificate
@cfy.pass_logger
def set_cluster(cluster_node_name,
ssh_user,
ssh_key,
ssh_port,
rest_certificate,
logger):
"""Set connection options for a cluster node.
`CLUSTER_NODE_NAME` is the name of the cluster node to set options for.
"""
if not env.profile.cluster:
err = CloudifyCliError('The current profile is not a cluster profile!')
err.possible_solutions = [
"Select a different profile using `cfy profiles use`",
"Run `cfy cluster update-profile`"
]
raise err
changed_node = None
for node in env.profile.cluster:
if node['name'] == cluster_node_name:
changed_node = node
break
else:
raise CloudifyCliError(
'Node {0} not found in the cluster'.format(cluster_node_name))
for source, target, label in [
(ssh_user, 'ssh_user', 'ssh user'),
(ssh_key, 'ssh_key', 'ssh key'),
(ssh_port, 'ssh_port', 'ssh port'),
(rest_certificate, 'cert', 'rest certificate'),
]:
if source:
changed_node[target] = source
logger.info('Node {0}: setting {1} to `{2}`'
.format(cluster_node_name, label, source))
env.profile.save()
logger.info('Settings saved successfully')
@profiles.command(
name='unset',
short_help='Clear manager username/password/tenant from current profile')
@cfy.options.manager_username_flag
@cfy.options.manager_password_flag
@cfy.options.manager_tenant_flag
@cfy.options.ssh_user_flag
@cfy.options.ssh_key_flag
@cfy.options.rest_certificate_flag
@cfy.options.skip_credentials_validation
@cfy.options.verbose()
@cfy.pass_logger
def unset(manager_username,
manager_password,
manager_tenant,
ssh_user,
ssh_key,
rest_certificate,
skip_credentials_validation,
logger):
"""Clear the manager username and/or password and/or tenant
from the *current* profile
"""
if not any([manager_username, manager_password, manager_tenant,
rest_certificate, ssh_user, ssh_key]):
raise CloudifyCliError("You must choose at least one of the following:"
" username, password, tenant, "
"rest certificate, ssh user, ssh key")
if manager_username:
username = os.environ.get(constants.CLOUDIFY_USERNAME_ENV)
else:
username = env.profile.manager_username
if manager_password:
password = os.environ.get(constants.CLOUDIFY_PASSWORD_ENV)
else:
password = env.profile.manager_password
if manager_tenant:
tenant = os.environ.get(constants.CLOUDIFY_TENANT_ENV)
else:
tenant = env.profile.manager_tenant
if rest_certificate:
cert = os.environ.get(constants.LOCAL_REST_CERT_FILE) \
or env.get_default_rest_cert_local_path()
else:
cert = None
if not skip_credentials_validation:
_validate_credentials(username, password, tenant, cert)
if manager_username:
logger.info('Clearing manager username')
env.profile.manager_username = None
if manager_password:
logger.info('Clearing manager password')
env.profile.manager_password = None
if manager_tenant:
logger.info('Clearing manager tenant')
env.profile.manager_tenant = None
if rest_certificate:
logger.info('Clearing rest certificate')
env.profile.rest_certificate = None
if ssh_user:
logger.info('Clearing ssh user')
env.profile.ssh_user = None
if ssh_key:
logger.info('Clearing ssh key')
env.profile.ssh_key = None
env.profile.save()
logger.info('Settings saved successfully')
@profiles.command(name='export',
short_help='Export all profiles to an archive')
@cfy.options.include_keys(helptexts.EXPORT_SSH_KEYS)
@cfy.options.optional_output_path
@cfy.options.verbose()
@cfy.pass_logger
def export_profiles(include_keys, output_path, logger):
"""Export all profiles to a file
WARNING: Including the ssh keys of your profiles in the archive means
that once the profiles are imported, the ssh keys will be put back
in their original locations!
If `-o / --output-path` is omitted, the archive's name will be
`cfy-profiles.tar.gz`.
"""
_assert_profiles_exist()
destination = output_path or \
os.path.join(os.getcwd(), 'cfy-profiles.tar.gz')
# TODO: Copy exported ssh keys to each profile's directory
logger.info('Exporting profiles to {0}...'.format(destination))
if include_keys:
for profile in env.get_profile_names():
_backup_ssh_key(profile)
utils.tar(env.PROFILES_DIR, destination)
if include_keys:
shutil.rmtree(EXPORTED_SSH_KEYS_DIR)
logger.info('Export complete!')
logger.info(
'You can import the profiles by running '
'`cfy profiles import PROFILES_ARCHIVE`')
@profiles.command(name='import',
short_help='Import profiles from an archive')
@cfy.argument('archive-path')
@cfy.options.include_keys(helptexts.IMPORT_SSH_KEYS)
@cfy.options.verbose()
@cfy.pass_logger
def import_profiles(archive_path, include_keys, logger):
"""Import profiles from a profiles archive
WARNING: If a profile exists both in the archive and locally
it will be overwritten (any other profiles will be left intact).
`ARCHIVE_PATH` is the path to the profiles archive to import.
"""
_assert_is_tarfile(archive_path)
_assert_profiles_archive(archive_path)
logger.info('Importing profiles from {0}...'.format(archive_path))
utils.untar(archive_path, os.path.dirname(env.PROFILES_DIR))
if include_keys:
for profile in env.get_profile_names():
_restore_ssh_key(profile)
else:
if EXPORTED_KEYS_DIRNAME in os.listdir(env.PROFILES_DIR):
logger.info("The profiles archive you provided contains ssh keys "
"for one or more profiles. To restore those keys to "
"their original locations, you can use the "
"`--include-keys flag or copy them manually from {0} "
.format(EXPORTED_SSH_KEYS_DIR))
logger.info('Import complete!')
logger.info('You can list profiles using `cfy profiles list`')
def _assert_profiles_exist():
if not env.get_profile_names():
raise CloudifyCliError('No profiles to export')
def _assert_profiles_archive(archive_path):
with closing(tarfile.open(name=archive_path)) as tar:
if not tar.getmembers()[0].name == 'profiles':
raise CloudifyCliError(
'The archive provided does not seem to be a valid '
'Cloudify profiles archive')
def _assert_is_tarfile(archive_path):
if not tarfile.is_tarfile(archive_path):
raise CloudifyCliError('The archive provided must be a tar.gz archive')
def _backup_ssh_key(profile):
return _move_ssh_key(profile, is_backup=True)
def _restore_ssh_key(profile):
return _move_ssh_key(profile, is_backup=False)
@cfy.pass_logger
def _move_ssh_key(profile, logger, is_backup):
"""Iterate through all profiles and move their ssh keys
This is how we backup and restore ssh keys.
"""
context = env.get_profile_context(profile)
key_filepath = context.ssh_key
if key_filepath:
backup_path = os.path.join(
EXPORTED_SSH_KEYS_DIR, os.path.basename(key_filepath)) + \
'.{0}.profile'.format(profile)
if is_backup:
if not os.path.isdir(EXPORTED_SSH_KEYS_DIR):
os.makedirs(EXPORTED_SSH_KEYS_DIR)
logger.info('Copying ssh key {0} to {1}...'.format(
key_filepath, backup_path))
shutil.copy2(key_filepath, backup_path)
else:
if os.path.isfile(backup_path):
logger.info(
'Restoring ssh key for profile {0} to {1}...'.format(
profile, key_filepath))
shutil.move(backup_path, key_filepath)
def _get_profile(profile_name):
current_profile = env.get_active_profile()
env.set_active_profile(profile_name)
context = env.get_profile_context(profile_name)
env.set_active_profile(current_profile)
return context.to_dict()
def _assert_manager_available(client, profile_name):
try:
return client.manager.get_status()
except UserUnauthorizedError, e:
raise CloudifyCliError(
"Can't use manager {0}\n{1}.".format(
profile_name,
str(e)
)
)
# The problem here is that, for instance,
# any problem raised by the rest client will trigger this.
# Triggering a CloudifyClientError only doesn't actually deal
# with situations like No route to host and the likes.
except Exception as ex:
raise CloudifyCliError(
"Can't use manager {0}. {1}".format(profile_name, str(ex.message)))
def _get_provider_context(profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant,
skip_credentials_validation):
try:
client = _get_client_and_assert_manager(
profile_name,
manager_ip,
rest_port,
rest_protocol,
rest_certificate,
manager_username,
manager_password,
manager_tenant
)
except CloudifyCliError:
if skip_credentials_validation:
return None
raise
try:
response = client.manager.get_context()
return response['context']
except CloudifyClientError:
return None
def _get_client_and_assert_manager(profile_name,
manager_ip=None,
rest_port=None,
rest_protocol=None,
rest_certificate=None,
manager_username=None,
manager_password=None,
manager_tenant=None):
# Attempt to update the profile with an existing profile context, if one
# is available. This is relevant in case the user didn't pass a username
# or a password, and was expecting them to be taken from the old profile
env.profile = env.get_profile_context(profile_name, suppress_error=True)
client = env.get_rest_client(
rest_host=manager_ip,
rest_port=rest_port,
rest_protocol=rest_protocol,
rest_cert=rest_certificate,
skip_version_check=True,
username=manager_username,
password=manager_password,
tenant_name=manager_tenant
)
_assert_manager_available(client, profile_name)
return client
def _set_profile_context(profile_name,
provider_context,
manager_ip,
ssh_key,
ssh_user,
ssh_port,
manager_username,
manager_password,
manager_tenant,
rest_port,
rest_protocol,
rest_certificate):
profile = env.get_profile_context(profile_name)
profile.provider_context = provider_context
if profile_name:
profile.profile_name = profile_name
if manager_ip:
profile.manager_ip = manager_ip
if ssh_key:
profile.ssh_key = ssh_key
if ssh_user:
profile.ssh_user = ssh_user
if rest_port:
profile.rest_port = rest_port
if manager_username:
profile.manager_username = manager_username
if manager_password:
profile.manager_password = manager_password
if manager_tenant:
profile.manager_tenant = manager_tenant
profile.ssh_port = ssh_port or constants.REMOTE_EXECUTION_PORT
profile.rest_protocol = rest_protocol
profile.rest_certificate = rest_certificate
profile.save()
def _is_manager_secured(response_history):
""" Checks if the manager is secured (ssl enabled)
The manager is secured if the request was redirected to https
"""
if response_history:
first_response = response_history[0]
return first_response.is_redirect \
and first_response.headers['location'].startswith('https')
return False
@cfy.pass_logger
def _validate_credentials(username, password, tenant, certificate, logger):
logger.info('Validating credentials...')
_get_client_and_assert_manager(
profile_name=env.profile.profile_name,
manager_username=username,
manager_password=password,
manager_tenant=tenant,
rest_certificate=certificate
)
logger.info('Credentials validated')
|
# coding=utf-8
import platform
from helpers import log
from requests.auth import HTTPBasicAuth
from globalvars import GlobalVars
import requests
import time
import json
if 'windows' in str(platform.platform()).lower():
# noinspection PyPep8Naming
from classes import Git as git
else:
from sh import git
# noinspection PyRedundantParentheses,PyClassHasNoInit,PyBroadException
class GitManager:
@staticmethod
def add_to_blacklist(**kwargs):
blacklist = kwargs.get("blacklist", "")
item_to_blacklist = kwargs.get("item_to_blacklist", "")
username = kwargs.get("username", "")
chat_profile_link = kwargs.get("chat_profile_link", "http://chat.stackexchange.com/users")
code_permissions = kwargs.get("code_permissions", False)
# Make sure git credentials are set up
if git.config("--get", "user.name", _ok_code=[0, 1]) == "":
return (False, "Tell someone to run `git config user.name \"SmokeDetector\"`")
if git.config("--get", "user.email", _ok_code=[0, 1]) == "":
return (False, "Tell someone to run `git config user.email \"smokey@erwaysoftware.com\"`")
if blacklist == "":
# If we broke the code, and this isn't assigned, error out before doing anything, but do
# so gracefully with a nice error message.
return (False, "Programming Error - Critical information missing for GitManager: blacklist")
if item_to_blacklist == "":
# If we broke the code, and this isn't assigned, error out before doing anything, but do
# so gracefully with a nice error message.
return (False, "Programming Error - Critical information missing for GitManager: item_to_blacklist")
item_to_blacklist = item_to_blacklist.replace("\s", " ")
if blacklist == "website":
blacklist_file_name = "blacklisted_websites.txt"
ms_search_option = "&body_is_regex=1&body="
elif blacklist == "keyword":
blacklist_file_name = "bad_keywords.txt"
ms_search_option = "&body_is_regex=1&body="
elif blacklist == "username":
blacklist_file_name = "blacklisted_usernames.txt"
ms_search_option = "&username_is_regex=1&username="
else:
# Just checking all bases, but blacklist_file_name *might* have empty value
# if we don't address it here.
return (False, "Invalid blacklist type specified, something has broken badly!")
git.checkout("master")
try:
git.pull()
except:
pass
# Check that we're up-to-date with origin (GitHub)
git.remote.update()
if 'windows' in platform.platform().lower():
if git.rev_parse("refs/remotes/origin/master").strip() != git.rev_parse("master").strip():
return (False, "HEAD isn't at tip of origin's master branch")
else:
if git("rev-parse", "refs/remotes/origin/master").strip() != git("rev-parse", "master").strip():
return (False, "HEAD isn't at tip of origin's master branch")
# Check that blacklisted_websites.txt isn't modified locally. That could get ugly fast
if blacklist_file_name in git.status(): # Also ugly
return (False, "{0} is modified locally. This is probably bad.".format(blacklist_file_name))
# Add item to file
with open(blacklist_file_name, "a+") as blacklist_file:
last_character = blacklist_file.read()[-1:]
if last_character not in ["", "\n"]:
blacklist_file.write("\n")
blacklist_file.write(item_to_blacklist + "\n")
# Checkout a new branch (for PRs for non-code-privileged people)
branch = "auto-blacklist-{0}".format(str(time.time()))
git.checkout("-b", branch)
# Clear HEAD just in case
git.reset("HEAD")
git.add(blacklist_file_name)
git.commit("--author='SmokeDetector <smokey@erwaysoftware.com>'",
"-m", u"Auto blacklist of {0} by {1} --autopull".format(item_to_blacklist, username))
if code_permissions:
git.checkout("master")
git.merge(branch)
git.push("origin", "master")
git.branch('-D', branch) # Delete the branch in the local git tree since we're done with it.
else:
git.push("origin", branch)
git.checkout("master")
if GlobalVars.github_username is None or GlobalVars.github_password is None:
return (False, "Tell someone to set a GH password")
payload = {"title": u"{0}: Blacklist {1}".format(username, item_to_blacklist),
"body": u"[{0}]({1}) requests the blacklist of the {2} {3}. See the Metasmoke search [here]"
"(https://metasmoke.erwaysoftware.com/search?utf8=%E2%9C%93{4}{5})\n"
u"<!-- METASMOKE-BLACKLIST-{6} {3} -->".format(username, chat_profile_link, blacklist,
item_to_blacklist, ms_search_option,
item_to_blacklist.replace(" ", "+"),
blacklist.upper()),
"head": branch,
"base": "master"}
response = requests.post("https://api.github.com/repos/Charcoal-SE/SmokeDetector/pulls",
auth=HTTPBasicAuth(GlobalVars.github_username, GlobalVars.github_password),
data=json.dumps(payload))
log('debug', response.json())
try:
git.checkout("deploy") # Return to deploy, pending the accept of the PR in Master.
git.branch('-D', branch) # Delete the branch in the local git tree since we're done with it.
return (True, "You don't have code privileges, but I've [created a pull request for you]({0}).".format(
response.json()["html_url"]))
except KeyError:
git.checkout("deploy") # Return to deploy
# Delete the branch in the local git tree, we'll create it again if the
# command is run again. This way, we keep things a little more clean in
# the local git tree
git.branch('-D', branch)
# Error capture/checking for any "invalid" GH reply without an 'html_url' item,
# which will throw a KeyError.
if "bad credentials" in str(response.json()['message']).lower():
# Capture the case when GH credentials are bad or invalid
return (False, "Something is wrong with the GH credentials, tell someone to check them.")
else:
# Capture any other invalid response cases.
return (False, "A bad or invalid reply was received from GH, the message was: %s" %
response.json()['message'])
git.checkout("deploy") # Return to deploy to await CI.
return (True, "Blacklisted {0}".format(item_to_blacklist))
@staticmethod
def current_git_status():
if 'windows' in platform.platform().lower():
return git.status_stripped()
else:
return git("-c", "color.status=false", "status")
gitmanager.py: Add duplicate check
Don't blacklist if something is already blacklisted
# coding=utf-8
import platform
from helpers import log
from requests.auth import HTTPBasicAuth
from globalvars import GlobalVars
import requests
import time
import json
if 'windows' in str(platform.platform()).lower():
# noinspection PyPep8Naming
from classes import Git as git
else:
from sh import git
# noinspection PyRedundantParentheses,PyClassHasNoInit,PyBroadException
class GitManager:
@staticmethod
def add_to_blacklist(**kwargs):
blacklist = kwargs.get("blacklist", "")
item_to_blacklist = kwargs.get("item_to_blacklist", "")
username = kwargs.get("username", "")
chat_profile_link = kwargs.get("chat_profile_link", "http://chat.stackexchange.com/users")
code_permissions = kwargs.get("code_permissions", False)
# Make sure git credentials are set up
if git.config("--get", "user.name", _ok_code=[0, 1]) == "":
return (False, "Tell someone to run `git config user.name \"SmokeDetector\"`")
if git.config("--get", "user.email", _ok_code=[0, 1]) == "":
return (False, "Tell someone to run `git config user.email \"smokey@erwaysoftware.com\"`")
if blacklist == "":
# If we broke the code, and this isn't assigned, error out before doing anything, but do
# so gracefully with a nice error message.
return (False, "Programming Error - Critical information missing for GitManager: blacklist")
if item_to_blacklist == "":
# If we broke the code, and this isn't assigned, error out before doing anything, but do
# so gracefully with a nice error message.
return (False, "Programming Error - Critical information missing for GitManager: item_to_blacklist")
item_to_blacklist = item_to_blacklist.replace("\s", " ")
if blacklist == "website":
blacklist_file_name = "blacklisted_websites.txt"
ms_search_option = "&body_is_regex=1&body="
elif blacklist == "keyword":
blacklist_file_name = "bad_keywords.txt"
ms_search_option = "&body_is_regex=1&body="
elif blacklist == "username":
blacklist_file_name = "blacklisted_usernames.txt"
ms_search_option = "&username_is_regex=1&username="
else:
# Just checking all bases, but blacklist_file_name *might* have empty value
# if we don't address it here.
return (False, "Invalid blacklist type specified, something has broken badly!")
git.checkout("master")
try:
git.pull()
except:
pass
# Check that we're up-to-date with origin (GitHub)
git.remote.update()
if 'windows' in platform.platform().lower():
if git.rev_parse("refs/remotes/origin/master").strip() != git.rev_parse("master").strip():
return (False, "HEAD isn't at tip of origin's master branch")
else:
if git("rev-parse", "refs/remotes/origin/master").strip() != git("rev-parse", "master").strip():
return (False, "HEAD isn't at tip of origin's master branch")
# Check that blacklisted_websites.txt isn't modified locally. That could get ugly fast
if blacklist_file_name in git.status(): # Also ugly
return (False, "{0} is modified locally. This is probably bad.".format(blacklist_file_name))
# Prevent duplicates
with open(blacklist_file_name, "r") as blacklist_file:
for lineno, line in enumerate(blacklist_file, 1):
if line.rstrip('\n') == item_to_blacklist:
return (False, '{0} already blacklisted on {1} line {2}'.format(
item_to_blacklist, blacklist_file_name, lineno))
# Add item to file
with open(blacklist_file_name, "a+") as blacklist_file:
last_character = blacklist_file.read()[-1:]
if last_character not in ["", "\n"]:
blacklist_file.write("\n")
blacklist_file.write(item_to_blacklist + "\n")
# Checkout a new branch (for PRs for non-code-privileged people)
branch = "auto-blacklist-{0}".format(str(time.time()))
git.checkout("-b", branch)
# Clear HEAD just in case
git.reset("HEAD")
git.add(blacklist_file_name)
git.commit("--author='SmokeDetector <smokey@erwaysoftware.com>'",
"-m", u"Auto blacklist of {0} by {1} --autopull".format(item_to_blacklist, username))
if code_permissions:
git.checkout("master")
git.merge(branch)
git.push("origin", "master")
git.branch('-D', branch) # Delete the branch in the local git tree since we're done with it.
else:
git.push("origin", branch)
git.checkout("master")
if GlobalVars.github_username is None or GlobalVars.github_password is None:
return (False, "Tell someone to set a GH password")
payload = {"title": u"{0}: Blacklist {1}".format(username, item_to_blacklist),
"body": u"[{0}]({1}) requests the blacklist of the {2} {3}. See the Metasmoke search [here]"
"(https://metasmoke.erwaysoftware.com/search?utf8=%E2%9C%93{4}{5})\n"
u"<!-- METASMOKE-BLACKLIST-{6} {3} -->".format(username, chat_profile_link, blacklist,
item_to_blacklist, ms_search_option,
item_to_blacklist.replace(" ", "+"),
blacklist.upper()),
"head": branch,
"base": "master"}
response = requests.post("https://api.github.com/repos/Charcoal-SE/SmokeDetector/pulls",
auth=HTTPBasicAuth(GlobalVars.github_username, GlobalVars.github_password),
data=json.dumps(payload))
log('debug', response.json())
try:
git.checkout("deploy") # Return to deploy, pending the accept of the PR in Master.
git.branch('-D', branch) # Delete the branch in the local git tree since we're done with it.
return (True, "You don't have code privileges, but I've [created a pull request for you]({0}).".format(
response.json()["html_url"]))
except KeyError:
git.checkout("deploy") # Return to deploy
# Delete the branch in the local git tree, we'll create it again if the
# command is run again. This way, we keep things a little more clean in
# the local git tree
git.branch('-D', branch)
# Error capture/checking for any "invalid" GH reply without an 'html_url' item,
# which will throw a KeyError.
if "bad credentials" in str(response.json()['message']).lower():
# Capture the case when GH credentials are bad or invalid
return (False, "Something is wrong with the GH credentials, tell someone to check them.")
else:
# Capture any other invalid response cases.
return (False, "A bad or invalid reply was received from GH, the message was: %s" %
response.json()['message'])
git.checkout("deploy") # Return to deploy to await CI.
return (True, "Blacklisted {0}".format(item_to_blacklist))
@staticmethod
def current_git_status():
if 'windows' in platform.platform().lower():
return git.status_stripped()
else:
return git("-c", "color.status=false", "status")
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import with_statement
import logging
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
import os
import re
import webapp2
import urllib2
import mimetypes
import jinja2
import logging
import StringIO
import json
import rdflib
#from rdflib.namespace import RDFS, RDF, OWL
#from rdflib.term import URIRef
from markupsafe import Markup, escape # https://pypi.python.org/pypi/MarkupSafe
import threading
import itertools
import datetime, time
from time import gmtime, strftime
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import users
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import modules
from google.appengine.api import runtime
from google.appengine.api import app_identity
from google.appengine.api.modules import modules
GAE_APP_ID = "appId"
GAE_VERSION_ID = "versionId"
#Testharness Used to indicate we are being called from tests - use setInTestHarness() & getInTestHarness() to manage value - defauluts to False (we are not in tests)
from testharness import *
from sdoutil import *
from api import *
from apirdfterm import *
from apirdflib import load_graph, getNss, getRevNss, buildSingleTermGraph, serializeSingleTermGrapth
from apirdflib import countTypes, countProperties, countEnums, graphFromFiles, getPathForPrefix, getPrefixForPath, rdfgettops
from apimarkdown import Markdown
from sdordf2csv import sdordf2csv
CONFIGFILE = os.environ.get("CONFIGFILE","sdoconfig.json")
SdoConfig.load(CONFIGFILE)
if not SdoConfig.valid:
log.error("Invalid config from '%s' or its includes !!" % CONFIGFILE)
os.exit()
SCHEMA_VERSION="3.5"
if not getInTestHarness():
GAE_APP_ID = app_identity.get_application_id()
GAE_VERSION_ID = modules.get_current_version_name()
FEEDBACK_FORM_BASE_URL='https://docs.google.com/a/google.com/forms/d/1krxHlWJAO3JgvHRZV9Rugkr9VYnMdrI10xbGsWt733c/viewform?entry.1174568178&entry.41124795={0}&entry.882602760={1}'
# {0}: term URL, {1} category of term.
sitemode = "mainsite" # whitespaced list for CSS tags,
# e.g. "mainsite testsite" when off expected domains
# "extensionsite" when in an extension (e.g. blue?)
releaselog = { "2.0": "2015-05-13", "2.1": "2015-08-06", "2.2": "2015-11-05", "3.0": "2016-05-04", "3.1": "2016-08-09", "3.2": "2017-03-23", "3.3": "2017-08-14", "3.4": "2018-06-15", "3.5": "2019-04-02" }
silent_skip_list = [ "favicon.ico" ] # Do nothing for now
all_layers = {}
ext_re = re.compile(r'([^\w,])+')
validNode_re = re.compile(r'^[\w\/.-]+$')
#TODO: Modes:
# mainsite
# webschemadev
# known extension (not skiplist'd, eg. demo1 on schema.org)
TEMPLATESDIR = SdoConfig.templateDir()
FileBasedTemplates = True
def urlTemplateLoader(name):
log.info("TEMPLATE LOADER LOOKING FOR: %s" % name)
url = TEMPLATESDIR + "/" + name
log.info("URL: %s" % url)
try:
fd = urllib2.urlopen(url)
res = fd.read()
except urllib2.URLError as e:
log.info("URLError %s" % e)
return None
return res
if TEMPLATESDIR:
if TEMPLATESDIR.startswith("file://"):
TEMPLATESDIR = TEMPLATESDIR[7:]
if "://" in TEMPLATESDIR:
FileBasedTemplates = False
else:
TEMPLATESDIR = os.path.join(os.path.dirname(__file__), 'templates')
log.info("No Templates directory defined - defaulting to %s" % TEMPLATESDIR)
if FileBasedTemplates:
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATESDIR),
extensions=['jinja2.ext.autoescape'], autoescape=True, cache_size=0)
else:
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FunctionLoader(urlTemplateLoader),
extensions=['jinja2.ext.autoescape'], autoescape=True, cache_size=0)
CANONICALSCHEME = "http"
ENABLE_JSONLD_CONTEXT = True
ENABLE_CORS = True
ENABLE_HOSTED_EXTENSIONS = True
DISABLE_NDB_FOR_LOCALHOST = True
ENABLEMOREINFO = True
WORKINGHOSTS = ["schema.org","schemaorg.appspot.com",
"webschemas.org","webschemas-g.appspot.com",
"sdo-test.appspot.com",
"localhost"]
EXTENSION_SUFFIX = "" # e.g. "*"
CORE = 'core'
ATTIC = 'attic'
ENABLED_EXTENSIONS = [ATTIC, 'auto', 'bib', 'health-lifesci', 'pending', 'meta', 'iot' ]
#### Following 2 lines look odd - leave them as is - just go with it!
ALL_LAYERS = [CORE,'']
ALL_LAYERS += ENABLED_EXTENSIONS
####
ALL_LAYERS_NO_ATTIC = list(ALL_LAYERS)
ALL_LAYERS_NO_ATTIC.remove(ATTIC)
setAllLayersList(ALL_LAYERS)
OUTPUTDATATYPES = [".csv",".jsonld",".ttl",".rdf",".xml",".nt"]
FORCEDEBUGGING = False
# FORCEDEBUGGING = True
SHAREDSITEDEBUG = True
if getInTestHarness():
SHAREDSITEDEBUG = False
LOADEDSOURCES = False
noindexpages = True
SUBDOMAINS = True
subs = os.environ.get("SUBDOMAINS",None)
if subs:
if subs.lower() == "true":
SUBDOMAINS = True
elif subs.lower() == "false":
SUBDOMAINS = False
else:
log.info("SUBDOMAINS set to invalid value %s - defaulting to %s" %(subs,SUBDOMAINS))
log.info("SUBDOMAINS set to %s" % SUBDOMAINS)
############# Warmup Control ########
WarmedUp = False
WarmupState = "Auto"
if "WARMUPSTATE" in os.environ:
WarmupState = os.environ["WARMUPSTATE"]
log.info("[%s] WarmupState: %s" % (getInstanceId(short=True),WarmupState))
if WarmupState.lower() == "off":
WarmedUp = True
elif "SERVER_NAME" in os.environ and ("localhost" in os.environ['SERVER_NAME'] and WarmupState.lower() == "auto"):
WarmedUp = True
############# Shared values and times ############
#### Memcache functions dissabled in test mode ###
appver = "TestHarness Version"
if "CURRENT_VERSION_ID" in os.environ:
appver = os.environ["CURRENT_VERSION_ID"]
def getAppEngineVersion():
ret = ""
if not getInTestHarness():
from google.appengine.api.modules.modules import get_current_version_name
ret = get_current_version_name()
#log.info("AppEngineVersion '%s'" % ret)
else:
return "TestVersion"
return ret
instance_first = True
instance_num = 0
callCount = 0
global_vars = threading.local()
starttime = datetime.datetime.utcnow()
systarttime = starttime
modtime = starttime
etagSlug = ""
if not getInTestHarness():
from google.appengine.api import memcache
class SlugEntity(ndb.Model):
slug = ndb.StringProperty()
modtime = ndb.DateTimeProperty()
def setmodiftime(sttime):
global modtime, etagSlug
if not getInTestHarness():
modtime = sttime.replace(microsecond=0)
etagSlug = "24751%s" % modtime.strftime("%y%m%d%H%M%Sa")
log.debug("set slug: %s" % etagSlug)
slug = SlugEntity(id="ETagSlug",slug=etagSlug, modtime=modtime)
slug.put()
def getmodiftime():
global modtime, etagSlug
if not getInTestHarness():
slug = SlugEntity.get_by_id("ETagSlug")
if not slug:#Occationally memcache will loose the value and result in becomming Null value
systarttime = datetime.datetime.utcnow()
tick()
setmodiftime(systarttime)#Will store it again
slug = SlugEntity.get_by_id("ETagSlug")
modtime = slug.modtime
etagSlug = str(slug.slug)
return modtime
def getslug():
global etagSlug
getmodiftime()
return etagSlug
def tick(): #Keep memcache values fresh so they don't expire
if not getInTestHarness():
memcache.set(key="SysStart", value=systarttime)
memcache.set(key="static-version", value=appver)
def check4NewVersion():
ret = False
dep = None
try:
fpath = os.path.join(os.path.split(__file__)[0], 'admin/deploy_timestamp.txt')
#log.info("fpath: %s" % fpath)
with open(fpath, 'r') as f:
dep = f.read()
dep = dep.replace("\n","")
f.close()
except Exception as e:
log.info("ERROR reading: %s" % e)
pass
if getInTestHarness() or "localhost" in os.environ['SERVER_NAME']: #Force new version logic for local versions and tests
ret = True
log.info("Assuming new version for local/test instance")
else:
stored,info = getTimestampedInfo("deployed-timestamp")
if stored != dep:
ret = True
return ret, dep
def storeNewTimestamp(stamp=None):
storeTimestampedInfo("deployed-timestamp",stamp)
def storeInitialisedTimestamp(stamp=None):
storeTimestampedInfo("initialised-timestamp",stamp)
if getInTestHarness():
load_examples_data(ENABLED_EXTENSIONS)
else: #Ensure clean start for any memcached or ndb store values...
changed, dep = check4NewVersion()
if changed: #We are a new instance of the app
msg = "New app instance [%s:%s] detected - FLUSHING CACHES. (deploy_timestamp='%s')\nLoaded Config file from: %s" % (GAE_VERSION_ID,GAE_APP_ID,dep,CONFIGFILE)
memcache.flush_all()
storeNewTimestamp(dep)
sdo_send_mail(to="rjw@dataliberate.com",subject="[SCHEMAINFO] from 'sdoapp'", msg=msg)
log.info("%s" % msg)
load_start = datetime.datetime.now()
systarttime = datetime.datetime.utcnow()
memcache.set(key="app_initialising", value=True, time=300) #Give the system 5 mins - auto remove flag in case of crash
memcache.set(key="static-version", value=appver)
memcache.add(key="SysStart", value=systarttime)
instance_first = True
cleanmsg = CacheControl.clean()
log.info("Clean count(s): %s" % cleanmsg)
log.info(("[%s] Cache clean took %s " % (getInstanceId(short=True),(datetime.datetime.now() - load_start))))
load_start = datetime.datetime.now()
tick()
memcache.set(key="app_initialising", value=False)
log.debug("[%s] Awake >>>>>>>>>>>." % (getInstanceId(short=True)))
storeInitialisedTimestamp()
else:
time.sleep(0.5) #Give time for the initialisation flag (possibly being set in another thread/instance) to be set
WAITCOUNT = 180
waittime = WAITCOUNT
while waittime > 0:
waittime -= 1
flag = memcache.get("app_initialising")
if not flag or flag == False: #Initialised or value missing
break
log.debug("[%s] Waited %s seconds for intialisation to end memcahce value = %s" % (getInstanceId(short=True),
(WAITCOUNT - waittime),memcache.get("app_initialising")))
time.sleep(1)
if waittime <= 0:
log.info("[%s] Waited %s seconds for intialisation to end - proceeding anyway!" % (getInstanceId(short=True),WAITCOUNT))
log.debug("[%s] End of waiting !!!!!!!!!!." % (getInstanceId(short=True)))
tick()
systarttime = memcache.get("SysStart")
if(not systarttime): #Occationally memcache will loose the value and result in systarttime becomming Null value
systarttime = datetime.datetime.utcnow()
tick()
setmodiftime(systarttime)
#################################################
def cleanPath(node):
"""Return the substring of a string matching chars approved for use in our URL paths."""
return re.sub(r'[^a-zA-Z0-9\-/,\.]', '', str(node), flags=re.DOTALL)
class HTMLOutput:
"""Used in place of http response when we're collecting HTML to pass to template engine."""
def __init__(self):
self.outputStrings = []
def write(self, str):
self.outputStrings.append(str)
def toHTML(self):
return Markup ( "".join(self.outputStrings) )
def __str__(self):
return self.toHTML()
# Core API: we have a single schema graph built from triples and units.
# now in api.py
class TypeHierarchyTree:
def __init__(self, prefix=""):
self.txt = ""
self.visited = []
self.prefix = prefix
def emit(self, s):
self.txt += s + "\n"
def emit2buff(self, buff, s):
buff.write(s + "\n")
def toHTML(self):
return '%s<ul>%s</ul>' % (self.prefix, self.txt)
def toJSON(self):
return self.txt
def traverseForHTML(self, term, depth = 1, hashorslash="/", layers='core', idprefix="", urlprefix="", traverseAllLayers=False, buff=None):
"""Generate a hierarchical tree view of the types. hashorslash is used for relative link prefixing."""
#log.info("traverseForHTML: node=%s hashorslash=%s" % ( term, hashorslash ))
if not term:
return False
if term.superseded() or term.getLayer() == ATTIC:
return False
localBuff = False
if buff == None:
localBuff = True
buff = StringIO.StringIO()
home = term.getLayer()
gotOutput = True
if home in ENABLED_EXTENSIONS and home != getHostExt():
urlprefix = makeUrl(home)
extclass = ""
extflag = ""
tooltip=""
if home != "core" and home != "":
extclass = "class=\"ext ext-%s\"" % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Extended schema: %s.schema.org\" " % home
# we are a supertype of some kind
subTypes = term.getSubs()
idstring = idprefix + term.getId()
if len(subTypes) > 0:
# and we haven't been here before
if term.getId() not in self.visited:
self.emit2buff(buff, ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s' % (" " * 4 * depth, idstring, tooltip, extclass, urlprefix, hashorslash, term.getId(), term.getId(), extflag) )
self.emit2buff(buff, ' %s<ul>' % (" " * 4 * depth))
# handle our subtypes
for item in subTypes:
subBuff = StringIO.StringIO()
got = self.traverseForHTML(item, depth + 1, hashorslash=hashorslash, layers=layers, idprefix=idprefix, urlprefix=urlprefix, traverseAllLayers=traverseAllLayers,buff=subBuff)
if got:
self.emit2buff(buff,subBuff.getvalue())
subBuff.close()
self.emit2buff(buff, ' %s</ul>' % (" " * 4 * depth))
else:
# we are a supertype but we visited this type before, e.g. saw Restaurant via Place then via Organization
seencount = self.visited.count(term.getId())
idstring = "%s%s" % (idstring, "+" * seencount)
seen = ' <a href="#%s">+</a> ' % term.getId()
self.emit2buff(buff, ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * 4 * depth, idstring, tooltip, extclass, urlprefix, hashorslash, term.getId(), term.getId(), extflag, seen) )
# leaf nodes
if len(subTypes) == 0:
gotOutput = True
seen = ""
if term.getId() in self.visited:
seencount = self.visited.count(term.getId())
idstring = "%s%s" % (idstring, "+" * seencount)
seen = ' <a href="#%s">+</a> ' % term.getId()
self.emit2buff(buff, '%s<li class="tleaf" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * depth, idstring, tooltip, extclass, urlprefix, hashorslash, term.getId(), term.getId(), extflag, seen ))
self.visited.append(term.getId()) # remember our visit
self.emit2buff(buff, ' %s</li>' % (" " * 4 * depth) )
if localBuff:
self.emit(buff.getvalue())
buff.close()
return gotOutput
# based on http://danbri.org/2013/SchemaD3/examples/4063550/hackathon-schema.js - thanks @gregg, @sandro
def traverseForJSONLD(self, term, depth = 0, last_at_this_level = True, supertype="None", layers='core'):
emit_debug = False
if not term or not term.getId():
log.error("Error None value passed to traverseForJSONLD()")
return
if term.getId() in self.visited:
# self.emit("skipping %s - already visited" % node.id)
return
self.visited.append(term.getId())
p1 = " " * 4 * depth
if emit_debug:
self.emit("%s# @id: %s last_at_this_level: %s" % (p1, term.getId(), last_at_this_level))
global namespaces;
ctx = "{}".format(""""@context": {
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"schema": "http://schema.org/",
"rdfs:subClassOf": { "@type": "@id" },
"name": "rdfs:label",
"description": "rdfs:comment",
"children": { "@reverse": "rdfs:subClassOf" }
},\n""" if last_at_this_level and depth==0 else '' )
unseen_subtypes = []
for st in term.getSubs():
if not st.getId() in self.visited:
unseen_subtypes.append(st)
unvisited_subtype_count = len(unseen_subtypes)
subtype_count = len( term.getSubs() )
supertx = "{}".format( '"rdfs:subClassOf": "schema:%s", ' % supertype.getId() if supertype != "None" else '' )
maybe_comma = "{}".format("," if unvisited_subtype_count > 0 else "")
comment = term.getComment().strip()
comment = ShortenOnSentence(StripHtmlTags(comment),60)
def encode4json(s):
return json.dumps(s)
self.emit('\n%s{\n%s\n%s"@type": "rdfs:Class", %s "description": %s,\n%s"name": "%s",\n%s"@id": "schema:%s",\n%s"layer": "%s"%s'
% (p1, ctx, p1, supertx, encode4json(comment), p1, term.getId(), p1, term.getId(), p1, term.getLayer(), maybe_comma))
i = 1
if unvisited_subtype_count > 0:
self.emit('%s"children": ' % p1 )
self.emit(" %s[" % p1 )
inner_lastness = False
for t in unseen_subtypes:
if emit_debug:
self.emit("%s # In %s > %s i: %s unvisited_subtype_count: %s" %(p1, node.id, t.id, i, unvisited_subtype_count))
if i == unvisited_subtype_count:
inner_lastness = True
i = i + 1
self.traverseForJSONLD(t, depth + 1, inner_lastness, supertype=term, layers=layers)
self.emit("%s ]%s" % (p1, "{}".format( "" if not last_at_this_level else '' ) ) )
maybe_comma = "{}".format( ',' if not last_at_this_level else '' )
self.emit('\n%s}%s\n' % (p1, maybe_comma))
def GetExamples(term, layers='core'):
"""Returns the examples (if any) for some Unit node."""
return LoadTermExamples(term)
def GetExtMappingsRDFa(term):
"""Self-contained chunk of RDFa HTML markup with mappings for this term."""
equivs = term.getEquivalents()
if (term.isClass()):
if len(equivs) > 0:
markup = ''
for c in equivs:
if (c.startswith('http')):
markup = markup + "<link property=\"owl:equivalentClass\" href=\"%s\"/>\n" % c
else:
markup = markup + "<link property=\"owl:equivalentClass\" resource=\"%s\"/>\n" % c
return markup
if (term.isProperty()):
if len(equivs) > 0:
markup = ''
for c in equivs:
markup = markup + "<link property=\"owl:equivalentProperty\" href=\"%s\"/>\n" % c
return markup
return "<!-- no external mappings noted for this term. -->"
class ShowUnit (webapp2.RequestHandler):
"""ShowUnit exposes schema.org terms via Web RequestHandler
(HTML/HTTP etc.).
"""
def emitCacheHeaders(self):
"""Send cache-related headers via HTTP."""
if "CACHE_CONTROL" in os.environ:
log.info("Setting http cache control to '%s' from .yaml" % os.environ["CACHE_CONTROL"])
self.response.headers['Cache-Control'] = os.environ["CACHE_CONTROL"]
else:
self.response.headers['Cache-Control'] = "public, max-age=600" # 10m
self.response.headers['Vary'] = "Accept, Accept-Encoding"
def write(self, str):
"""Write some text to Web server's output stream."""
self.outputStrings.append(str)
def moreInfoBlock(self, term, layer='core'):
# if we think we have more info on this term, show a bulleted list of extra items.
moreblock = os.environ.get("MOREBLOCK")
if not moreblock or (moreblock.lower() == "false"):
return ""
# defaults
bugs = ["No known open issues."]
mappings = ["No recorded schema mappings."]
items = bugs + mappings
feedback_url = FEEDBACK_FORM_BASE_URL.format(term.getUri, term.getType())
items = [
self.emitCanonicalURL(term),
self.emitEquivalents(term),
"<a href='{0}'>Leave public feedback on this term 💬</a>".format(feedback_url),
"<a href='https://github.com/schemaorg/schemaorg/issues?q=is%3Aissue+is%3Aopen+{0}'>Check for open issues.</a>".format(term.getId())
]
if term.getLayer() != "core":
items.append("'{0}' is mentioned in the <a href='{1}'>{2}</a> extention.".format( term.getId(), makeUrl(term.getLayer(),"",full=True), term.getLayer() ))
moreinfo = """<div>
<div id='infobox' style='text-align: right;' role="checkbox" aria-checked="false"><label for="morecheck"><b><span style="cursor: pointer;">[more...]</span></b></label></div>
<input type='checkbox' checked="checked" style='display: none' id=morecheck><div id='infomsg' style='background-color: #EEEEEE; text-align: left; padding: 0.5em;'>
<ul>"""
for i in items:
if i and len(i):
moreinfo += "<li>%s</li>" % i
# <li>mappings to other terms.</li>
# <li>or links to open issues.</li>
moreinfo += "</ul>\n</div>\n</div>\n"
return moreinfo
def ml(self, term, label='', title='', prop='', hashorslash='/'):
"""ml ('make link')
Returns an HTML-formatted link to the class or property URL
* label = optional anchor text label for the link
* title = optional title attribute on the link
* prop = an optional property value to apply to the A element
"""
if not term:
return ""
if ":" in term.getId():
return self.external_ml(term,title=title, prop=prop)
if label=='':
label = term.getLabel()
if title != '':
title = " title=\"%s\"" % (title)
if prop:
prop = " property=\"%s\"" % (prop)
rdfalink = ''
if prop:
rdfalink = '<link %s href="%s%s" />' % (prop,api.SdoConfig.vocabUri(),label)
if(term.id == "DataType"): #Special case
return "%s<a href=\"%s\">%s</a>" % (rdfalink,term.getId(), term.getId())
urlprefix = "."
home = term.getLayer()
if home in ENABLED_EXTENSIONS and home != getHostExt():
port = ""
if getHostPort() != "80":
port = ":%s" % getHostPort()
urlprefix = makeUrl(home,full=True)
extclass = ""
extflag = ""
tooltip = ""
if home != "core" and home != "":
if home != "meta":
extclass = "class=\"ext ext-%s\" " % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Defined in extension: %s.schema.org\" " % home
return "%s<a %s %s href=\"%s%s%s\"%s>%s</a>%s" % (rdfalink,tooltip, extclass, urlprefix, hashorslash, term.getId(), title, label, extflag)
#return "<a %s %s href=\"%s%s%s\"%s%s>%s</a>%s" % (tooltip, extclass, urlprefix, hashorslash, node.id, prop, title, label, extflag)
def external_ml(self, term, title='', prop=''):
#log.info("EXTERNAL!!!! %s %s " % (term.getLabel(),term.getId()))
name = term.getId()
if not ":" in name:
return name
if name.startswith("http") and '#' in name:
x = name.split("#")
path = x[0] + "#"
val = x[1]
voc = getPrefixForPath(path)
elif name.startswith("http"):
val = os.path.basename(name)
path = name[:len(name) - len(val)]
voc = getPrefixForPath(path)
else:
x = name.split(":")
voc = x[0]
val = x[1]
path = getPathForPrefix(voc)
if path:
if not path.endswith("#") and not path.endswith("/"):
path += "/"
if title != '':
title = " title=\"%s\"" % str(title)
if prop:
prop = " property=\"%s\"" % (prop)
rdfalink = ''
if prop:
rdfalink = '<link %s href="%s%s" />' % (prop,api.SdoConfig.vocabUri(),label)
return "%s<a %s href=\"%s%s\" class=\"externlink\" target=\"_blank\">%s:%s</a>" % (rdfalink,title,path,val,voc,val)
def makeLinksFromArray(self, nodearray, tooltip=''):
"""Make a comma separate list of links via ml() function.
* tooltip - optional text to use as title of all links
"""
hyperlinks = []
for f in nodearray:
hyperlinks.append(self.ml(f, f.id, tooltip))
return (", ".join(hyperlinks))
def emitUnitHeaders(self, term, layers='core'):
"""Write out the HTML page headers for this node."""
self.write("<h1 property=\"rdfs:label\" class=\"page-title\">")
self.write(term.getLabel())
self.write("</h1>\n")
home = term.getLayer()
if home != "core" and home != "":
exthome = "%s.schema.org" % home
exthomeurl = uri = makeUrl(home,"/",full=True)
linktext = "Defined in the %s section."
lt = SdoConfig.getDescriptor(home,"linktext")
if lt:
if lt.count("%s") != 1:
log.error("ERROR Linktext '%s' includes %s '%%s' - only 1 permitted" % (lt,lt.count()))
else:
linktext = lt
t = SdoConfig.getDescriptor(home,"disambiguatingDescription")
linkinsert = "<a title=\"%s\" href=\"%s\">%s</a>" % (t,exthomeurl,home)
self.write("<span class=\"extlink\">")
self.write(linktext % linkinsert)
self.write("<br/></span>")
if not ENABLEMOREINFO:
self.write(self.emitCanonicalURL(term))
eq = self.emitEquivalents(term)
if eq and len(eq):
self.write()
self.BreadCrumbs(term)
comment = term.getComment()
self.write(" <div property=\"rdfs:comment\">%s</div>\n\n" % (comment) + "\n")
usage = GetUsage(term.getId())
#if len(usage):
# self.write(" <br/><div>Usage: %s</div>\n\n" % (usage) + "\n")
if ENABLEMOREINFO:
self.write(self.moreInfoBlock(term))
def emitCanonicalURL(self,term):
out = ""
site = SdoConfig.vocabUri()
if site != "http://schema.org":
cURL = "%s%s" % (site,term.getId())
output = " <span class=\"canonicalUrl\">Canonical URL: %s</span> " % (cURL)
else:
cURL = "%s://schema.org/%s" % (CANONICALSCHEME,term.getId())
if CANONICALSCHEME == "http":
other = "https"
else:
other = "http"
sa = '\n<link property="sameAs" href="%s://schema.org/%s" />' % (other,term.getId())
self.write(sa)
output = " <span class=\"canonicalUrl\">Canonical URL: <a href=\"%s\">%s</a></span> " % (cURL, cURL)
return output
def emitEquivalents(self,term):
buff = StringIO.StringIO()
equivs = term.getEquivalents()
if len(equivs) > 0:
if (term.isClass() or term.isDataType()):
label = "Equivalent Class:"
else:
label = "Equivalent Property:"
br = ""
for e in equivs:
eq = VTerm.getTerm(e,createReference=True)
log.info("EQUIVALENT %s %s" % (e,eq))
title = eq.getUri()
buff.write("%s<span class=\"equivalents\">%s %s</span> " % (br,label,self.ml(eq,title=title)))
br = "<br/>"
return buff.getvalue()
# Stacks to support multiple inheritance
crumbStacks = []
def BreadCrumbs(self, term):
self.crumbStacks = term.getParentPaths()
for cstack in self.crumbStacks:
if term.isProperty():
cstack.append(VTerm.getTerm("http://schema.org/Property"))
cstack.append(VTerm.getTerm("http://schema.org/Thing"))
elif term.isDataType() and not term.id == "DataType":
cstack.append(VTerm.getTerm("http://schema.org/DataType"))
enuma = term.isEnumerationValue()
crumbsout = []
for row in range(len(self.crumbStacks)):
thisrow = ""
targ = self.crumbStacks[row][len(self.crumbStacks[row])-1]
if not targ:
continue
count = 0
while(len(self.crumbStacks[row]) > 0):
propertyval = None
n = self.crumbStacks[row].pop()
if((len(self.crumbStacks[row]) == 1) and n and
not ":" in n.id) : #penultimate crumb that is not a non-schema reference
if term.isProperty():
if n.isProperty(): #Can only be a subproperty of a property
propertyval = "rdfs:subPropertyOf"
else:
propertyval = "rdfs:subClassOf"
if(count > 0):
if((len(self.crumbStacks[row]) == 0) and enuma): #final crumb
thisrow += " :: "
else:
thisrow += " > "
count += 1
thisrow += "%s" % (self.ml(n,prop=propertyval))
crumbsout.append(thisrow)
self.write("<h4>")
rowcount = 0
for crumb in sorted(crumbsout):
if rowcount > 0:
self.write("<br/>")
self.write("<span class='breadcrumbs'>%s</span>\n" % crumb)
rowcount += 1
self.write("</h4>\n")
#Walk up the stack, appending crumbs & create new (duplicating crumbs already identified) if more than one parent found
def WalkCrumbs(self, term, cstack):
if ":" in term.getId(): #Suppress external class references
return
cstack.append(term)
tmpStacks = []
tmpStacks.append(cstack)
supers = term.getSupers()
for i in range(len(supers)):
if(i > 0):
t = cstack[:]
tmpStacks.append(t)
self.crumbStacks.append(t)
x = 0
for p in supers:
self.WalkCrumbs(p,tmpStacks[x])
x += 1
def emitSimplePropertiesPerType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties applicable to the specified type."""
if not out:
out = self
out.write("<ul class='props4type'>")
for prop in VTerm.getTerm(cl).getProperties():
if prop.superseded():
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.getId(), prop.getId() ))
out.write("</ul>\n\n")
def emitSimplePropertiesIntoType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties whose values are the specified type."""
if not out:
out = self
out.write("<ul class='props2type'>")
for prop in VTerm.getTerm(cl).getTargetOf():
if prop.superseded():
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.getId(), prop.getId() ))
out.write("</ul>\n\n")
def hideAtticTerm(self,term):
if getHostExt() == ATTIC:
return False
if term.inLayers([ATTIC]):
return True
return False
def ClassProperties (self, cl, subclass=False, term=None, out=None, hashorslash="/"):
"""Write out a table of properties for a per-type page."""
if not out:
out = self
propcount = 0
headerPrinted = False
props = cl.getProperties()
for prop in props:
if prop.superseded() or self.hideAtticTerm(prop):
continue
olderprops = prop.getSupersedes()
inverseprop = prop.getInverseOf()
ranges = prop.getRanges()
doms = prop.getDomains()
comment = prop.getComment()
if ":" in prop.id and comment == "-":
comment = "Term from external vocabulary"
if not getAppVar("tableHdr"):
setAppVar("tableHdr",True)
if ((term.isClass() or term.isEnumeration()) and not term.isDataType() and term.id != "DataType"):
self.write("<table class=\"definition-table\">\n <thead>\n <tr><th>Property</th><th>Expected Type</th><th>Description</th> \n </tr>\n </thead>\n\n")
self.tablehdr = True
if (not headerPrinted):
class_head = self.ml(cl)
out.write("<tr class=\"supertype\">\n <th class=\"supertype-name\" colspan=\"3\">Properties from %s</th>\n \n</tr>\n\n<tbody class=\"supertype\">\n " % (class_head))
headerPrinted = True
out.write("<tr typeof=\"rdfs:Property\" resource=\"%s\">\n \n <th class=\"prop-nam\" scope=\"row\">\n\n<code property=\"rdfs:label\">%s</code>\n </th>\n " % (prop.getUri(), self.ml(prop)))
out.write("<td class=\"prop-ect\">\n")
first_range = True
for r in ranges:
if (not first_range):
out.write(" or <br/> ")
first_range = False
out.write(self.ml(r, prop='rangeIncludes'))
out.write(" ")
for d in doms:
out.write("<link property=\"domainIncludes\" href=\"%s\">" % d.getUri())
out.write("</td>")
out.write("<td class=\"prop-desc\" property=\"rdfs:comment\">%s" % (comment))
if (olderprops and len(olderprops) > 0):
olderlinks = ", ".join([self.ml(o) for o in olderprops])
out.write(" Supersedes %s." % olderlinks )
if (inverseprop != None):
out.write("<br/> Inverse property: %s." % (self.ml(inverseprop)))
out.write("</td></tr>")
subclass = False
propcount += 1
if subclass: # in case the superclass has no defined attributes
out.write("<tr><td colspan=\"3\"></td></tr>")
return propcount
def emitClassExtensionSuperclasses (self, cl, layers="core", out=None):
first = True
count = 0
if not out:
out = self
buff = StringIO.StringIO()
#log.info("SUPERS %s" % VTerm.term2str(cl.getSupers()))
for p in cl.getSupers():
if not p.isReference() and p.inLayers(layers):
continue
sep = ", "
if first:
sep = "<li>"
first = False
buff.write("%s%s" % (sep,self.ml(p)))
count += 1
if(count > 0):
buff.write("</li>\n")
content = buff.getvalue()
if(len(content) > 0):
if cl.getId() == "DataType":
self.write("<h4>Subclass of:<h4>")
else:
self.write("<h4>Available supertypes defined elsewhere</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
""" def emitClassExtensionProperties (self, cl, layers="core", out=None):
if not out:
out = self
buff = StringIO.StringIO()
for p in self.parentStack:
self._ClassExtensionProperties(buff, p, layers=layers)
content = buff.getvalue()
if(len(content) > 0):
self.write("<h4>Available properties in extensions</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
"""
def _ClassExtensionProperties (self, out, cl, layers="core"):
"""Write out a list of properties not displayed as they are in extensions for a per-type page."""
di = Unit.GetUnit("schema:domainIncludes")
targetlayers=self.appropriateLayers(layers)
#log.info("Appropriate targets %s" % targetlayers)
exts = {}
for prop in sorted(GetSources(di, cl, targetlayers), key=lambda u: u.id):
if ":" in prop.id:
continue
if (prop.superseded(layers=targetlayers)):
continue
if inLayer(layers,prop): #Already in the correct layer - no need to report
continue
if inLayer("meta",prop): #Suppress mentioning properties from the 'meta' extension.
continue
ext = prop.getHomeLayer()
if not ext in exts.keys():
exts[ext] = []
exts[ext].append(prop)
for e in sorted(exts.keys()):
count = 0
first = True
for p in sorted(exts[e], key=lambda u: u.id):
sep = ", "
if first:
out.write("<li>For %s in the <a href=\"%s\">%s</a> extension: " % (self.ml(cl),makeUrl(e,""),e))
sep = ""
first = False
out.write("%s%s" % (sep,self.ml(p)))
count += 1
if(count > 0):
out.write("</li>\n")
def emitClassIncomingProperties (self, term, out=None, hashorslash="/"):
"""Write out a table of incoming properties for a per-type page."""
if not out:
out = self
headerPrinted = False
props = term.getTargetOf()
for prop in props:
if (prop.superseded()):
continue
supersedes = prop.getSupersedes()
inverseprop = prop.getInverseOf()
ranges = prop.getRanges()
domains = prop.getDomains()
comment = prop.getComment()
if (not headerPrinted):
self.write("<br/><br/><div id=\"incoming\">Instances of %s may appear as values for the following properties</div><br/>" % (self.ml(term)))
self.write("<table class=\"definition-table\">\n \n \n<thead>\n <tr><th>Property</th><th>On Types</th><th>Description</th> \n </tr>\n</thead>\n\n")
headerPrinted = True
self.write("<tr>\n<th class=\"prop-nam\" scope=\"row\">\n <code>%s</code>\n</th>\n " % (self.ml(prop)) + "\n")
self.write("<td class=\"prop-ect\">\n")
first_dom = True
for d in domains:
if (not first_dom):
self.write(" or<br/> ")
first_dom = False
self.write(self.ml(d))
self.write(" ")
self.write("</td>")
self.write("<td class=\"prop-desc\">%s " % (comment))
if supersedes:
self.write(" Supersedes")
first = True
for s in supersedes:
if first:
first = False
self.write(",")
self.write(" %s" % self.ml(s))
self.write(". ")
if inverseprop:
self.write("<br/> inverse property: %s." % (self.ml(inverseprop)) )
self.write("</td></tr>")
if (headerPrinted):
self.write("</table>\n")
def emitRangeTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of this property's expected types."""
if not out:
out = self
out.write("<ul class='attrrangesummary'>")
for rt in VTerm.getTerm(node).getRanges():
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, rt.getId(), rt.getId() ))
out.write("</ul>\n\n")
def emitDomainTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of types that expect this property."""
if not out:
out = self
out.write("<ul class='attrdomainsummary'>")
for dt in VTerm.getTerm(node).getDomains():
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, dt.getId(), dt.getId() ))
out.write("</ul>\n\n")
def emitAttributeProperties(self, term, out=None, hashorslash="/"):
"""Write out properties of this property, for a per-property page."""
if not out:
out = self
ranges = term.getRanges()
domains =term.getDomains()
inverseprop = term.getInverseOf()
subprops = term.getSubs()
superprops = term.getSupers()
if (inverseprop != None):
tt = "This means the same thing, but with the relationship direction reversed."
out.write("<p>Inverse-property: %s.</p>" % (self.ml(inverseprop, inverseprop.getId(),tt, prop=False, hashorslash=hashorslash)) )
out.write("<table class=\"definition-table\">\n")
out.write("<thead>\n <tr>\n <th>Values expected to be one of these types</th>\n </tr>\n</thead>\n\n <tr>\n <td>\n ")
first_range = True
for r in ranges:
if (not first_range):
out.write("<br/>")
first_range = False
tt = "The '%s' property has values that include instances of the '%s' type." % (term.getId(), r.getId())
out.write(" <code>%s</code> " % (self.ml(r, r.getId(), tt, prop="rangeIncludes", hashorslash=hashorslash) +"\n"))
out.write(" </td>\n </tr>\n</table>\n\n")
first_domain = True
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Used on these types</th>\n </tr>\n</thead>\n<tr>\n <td>")
for d in domains:
if (not first_domain):
out.write("<br/>")
first_domain = False
tt = "The '%s' property is used on the '%s' type." % (term.getId(), d.getId())
out.write("\n <code>%s</code> " % (self.ml(d, d.getId(), tt, prop="domainIncludes",hashorslash=hashorslash)+"\n" ))
out.write(" </td>\n </tr>\n</table>\n\n")
# Sub-properties
if (subprops != None and len(subprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Sub-properties</th>\n </tr>\n</thead>\n")
for sp in subprops:
c = ShortenOnSentence(StripHtmlTags( sp.getComment() ),60)
tt = "%s: ''%s''" % ( sp.getId(), c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(sp, sp.getId(), tt, hashorslash=hashorslash)))
out.write("\n</table>\n\n")
# Super-properties
if (superprops != None and len(superprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Super-properties</th>\n </tr>\n</thead>\n")
for sp in superprops:
c = ShortenOnSentence(StripHtmlTags( sp.getComment() ),60)
tt = "%s: ''%s''" % ( sp.getId(), c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(sp, sp.getId(), tt, hashorslash=hashorslash)))
out.write("\n</table>\n\n")
def emitSupersedes(self, term, out=None, hashorslash="/"):
"""Write out Supersedes and/or Superseded by for this term"""
if not out:
out = self
newerprop = term.getSupersededBy() # None of one. e.g. we're on 'seller'(new) page, we get 'vendor'(old)
#olderprop = node.supersedes(layers=layers) # None or one
olderprops = term.getSupersedes()
# Supersedes
if (olderprops != None and len(olderprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Supersedes</th>\n </tr>\n</thead>\n")
for o in olderprops:
c = ShortenOnSentence(StripHtmlTags( o.getComment() ),60)
tt = "%s: ''%s''" % ( o.getId(), c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(o, o.getId(), tt)))
log.info("Super %s" % o.getId())
out.write("\n</table>\n\n")
# supersededBy (at most one direct successor)
if (newerprop != None):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th><a href=\"/supersededBy\">supersededBy</a></th>\n </tr>\n</thead>\n")
c = ShortenOnSentence(StripHtmlTags( newerprop.getComment() ),60)
tt = "%s: ''%s''" % ( newerprop.getId(), c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(newerprop, newerprop.getId(), tt)))
out.write("\n</table>\n\n")
def rep(self, markup):
"""Replace < and > with HTML escape chars."""
m1 = re.sub("<", "<", markup)
m2 = re.sub(">", ">", m1)
# TODO: Ampersand? Check usage with examples.
return m2
def handleHomepage(self, node):
"""Send the homepage, or if no HTML accept header received and JSON-LD was requested, send JSON-LD context file.
typical browser accept list: ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
# e.g. curl -H "Accept: application/ld+json" http://localhost:8080/
see also http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
https://github.com/rvguha/schemaorg/issues/5
https://github.com/rvguha/schemaorg/wiki/JsonLd
"""
accept_header = self.request.headers.get('Accept')
if accept_header:
accept_header = accept_header.split(',')
else:
accept_header = ""
# Homepage is content-negotiated. HTML or JSON-LD.
mimereq = {}
for ah in accept_header:
ah = re.sub( r";q=\d?\.\d+", '', ah).rstrip()
mimereq[ah] = 1
html_score = mimereq.get('text/html', 5)
xhtml_score = mimereq.get('application/xhtml+xml', 5)
jsonld_score = mimereq.get('application/ld+json', 10)
json_score = mimereq.get('application/json', 10)
#log.info( "accept_header: " + str(accept_header) + " mimereq: "+str(mimereq) + "Scores H:{0} XH:{1} JL:{2} ".format(html_score,xhtml_score,jsonld_score,json_score))
if (ENABLE_JSONLD_CONTEXT and ((jsonld_score < html_score and jsonld_score < xhtml_score) or (json_score < html_score and json_score < xhtml_score))):
self.response.set_status(302,"Found")
self.response.headers['Location'] = makeUrl("","docs/jsonldcontext.json")
self.emitCacheHeaders()
return False #don't cache this redirect
else:
# Serve a homepage from template
# the .tpl has responsibility for extension homepages
# TODO: pass in extension, base_domain etc.
#sitekeyedhomepage = "homepage %s" % getSiteName()
ext = getHostExt()
if ext == "core":
ext = ""
if len(ext):
ext += "."
sitekeyedhomepage = "%sindex.html" % ext
hp = getPageFromStore(sitekeyedhomepage)
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
if hp:
self.response.out.write( hp )
#log.info("Served datacache homepage.tpl key: %s" % sitekeyedhomepage)
else:
template_values = {
'ext_contents': self.handleExtensionContents(getHostExt()),
'home_page': "True",
}
page = templateRender('homepage.tpl', node, template_values)
self.response.out.write( page )
log.debug("Served and cached fresh homepage.tpl key: %s " % sitekeyedhomepage)
setAppVar(CLOUDEXTRAMETA,{'x-goog-meta-sdotermlayer': getHostExt()})
PageStore.put(sitekeyedhomepage, page)
# self.response.out.write( open("static/index.html", 'r').read() )
return False # - Not caching homepage
log.info("Warning: got here how?")
return False
def getExtendedSiteName(self, layers):
"""Returns site name (domain name), informed by the list of active layers."""
if layers==["core"]:
return "schema.org"
if not layers or len(layers)==0:
return "schema.org"
return (getHostExt() + ".schema.org")
def emitSchemaorgHeaders(self, node, ext_mappings='', sitemode="default", sitename="schema.org", layers="core"):
self.response.out.write(self.buildSchemaorgHeaders(node, ext_mappings, sitemode, sitename, layers))
def buildSiteHeaders(self, term, ext_mappings='', sitemode="default", sitename="schema.org"):
"""
Generates, caches and emits HTML headers for class, property and enumeration pages. Leaves <body> open.
* entry = name of the class or property
"""
buff = sdoStringIO()
rdfs_type = 'rdfs:Class'
entry = term.id
if term.isProperty():
rdfs_type = 'rdfs:Property'
desc = entry
desc = self.getMetaDescription(term, lengthHint=200)
template_values = {
'entry': str(entry),
'desc' : desc,
'menu_sel': "Schemas",
'rdfs_type': rdfs_type,
'ext_mappings': ext_mappings,
'noindexpage': noindexpages
}
out = templateRender('genericTermPageHeader.tpl', term, template_values)
buff.write(out)
ret = buff.getvalue()
buff.close()
return ret
def buildSchemaorgHeaders(self, node, ext_mappings='', sitemode="default", sitename="schema.org", layers="core"):
"""
Generates, caches and emits HTML headers for class, property and enumeration pages. Leaves <body> open.
* entry = name of the class or property
"""
buff = sdoStringIO()
rdfs_type = 'rdfs:Property'
anode = True
if isinstance(node, str):
entry = node
anode = False
else:
entry = node.id
if node.isEnumeration():
rdfs_type = 'rdfs:Class'
elif node.isEnumerationValue():
rdfs_type = ""
nodeTypes = GetTargets(Unit.GetUnit("rdf:type"), node, layers=layers)
typecount = 0
for type in nodeTypes:
if typecount > 0:
rdfs_type += " "
rdfs_type += type.id
typecount += 1
elif node.isClass():
rdfs_type = 'rdfs:Class'
elif node.isAttribute():
rdfs_type = 'rdfs:Property'
desc = entry
if anode:
desc = self.getMetaDescription(node, layers=layers, lengthHint=200)
template_values = {
'entry': str(entry),
'desc' : desc,
'menu_sel': "Schemas",
'rdfs_type': rdfs_type,
'ext_mappings': ext_mappings,
'noindexpage': noindexpages
}
out = templateRender('genericTermPageHeader.tpl', node, template_values)
buff.write(out)
ret = buff.getvalue()
buff.close()
return ret
def getMetaDescription(self, term, layers="core",lengthHint=250):
ins = ""
if term.isEnumeration():
ins += " Enumeration Type"
elif term.isClass():
ins += " Type"
elif term.isProperty():
ins += " Property"
elif term.isEnumerationValue():
ins += " Enumeration Value"
desc = "Schema.org%s: %s - " % (ins, term.id)
lengthHint -= len(desc)
comment = term.getComment()
desc += ShortenOnSentence(StripHtmlTags(comment),lengthHint)
return desc
def appropriateLayers(self,layers="core"):
if ATTIC in layers:
return ALL_LAYERS
return ALL_LAYERS_NO_ATTIC
def emitExactTermPage(self, term, layers="core"):
"""Emit a Web page that exactly matches this node."""
log.info("EXACT PAGE: %s" % term.getId())
self.outputStrings = [] # blank slate
cached = getPageFromStore(term.getId())
if (cached != None):
log.info("GOT CACHED page for %s" % term.getId())
self.response.write(cached)
return
log.info("Building page")
ext_mappings = GetExtMappingsRDFa(term)
self.write(self.buildSiteHeaders(term, ext_mappings, sitemode, getSiteName()))
#log.info("Done buildSiteHeaders")
#log.info("Stak %s" % term.getTermStack())
self.emitUnitHeaders(term) # writes <h1><table>...
stack = self._removeStackDupes(term.getTermStack())
setAppVar("tableHdr",False)
if term.isClass() or term.isDataType() or term.isEnumeration():
for p in stack:
self.ClassProperties(p, p==[0], out=self, term=term)
if getAppVar("tableHdr"):
self.write("\n\n</table>\n\n")
self.emitClassIncomingProperties(term)
self.emitClassExtensionSuperclasses(term,layers)
#self.emitClassExtensionProperties(p,layers) #Not needed since extension defined properties displayed in main listing
elif term.isProperty():
self.emitAttributeProperties(term)
elif term.isDataType():
self.emitClassIncomingProperties(term)
self.emitSupersedes(term)
self.emitchildren(term)
self.emitAcksAndSources(term)
self.emitTermExamples(term)
self.write(" <br/>\n\n</div>\n</body>\n<!-- AppEngineVersion %s (%s)-->\n</html>" % (getAppEngineVersion(),appver))
page = "".join(self.outputStrings)
setAppVar(CLOUDEXTRAMETA,{'x-goog-meta-sdotermlayer': term.getLayer()})
PageStore.put(term.getId(),page)
self.response.write(page)
def emitTermExamples(self,term):
examples = GetExamples(term)
log.debug("Rendering n=%s examples" % len(examples))
if (len(examples) > 0):
example_labels = [
('Without Markup', 'original_html', 'selected'),
('Microdata', 'microdata', ''),
('RDFa', 'rdfa', ''),
('JSON-LD', 'jsonld', ''),
]
self.write("<b><a %s >Examples</a></b><br/><br/>\n\n" % self.showlink("examples"))
exNum = 0
for ex in sorted(examples, key=lambda u: u.keyvalue):
#if not ex.egmeta["layer"] in layers: #Example defined in extension we are not in
#continue
exNum += 1
id="example-%s" % exNum
if "id" in ex.egmeta:
id = ex.egmeta["id"]
self.write("<div><a %s>Example %s</a></div>" % (self.showlink(id),exNum))
self.write("<div class='ds-selector-tabs ds-selector'>\n")
self.write(" <div class='selectors'>\n")
for label, example_type, selected in example_labels:
self.write(" <a data-selects='%s' class='%s'>%s</a>\n"
% (example_type, selected, label))
self.write("</div>\n\n")
for label, example_type, selected in example_labels:
self.write("<pre class=\"prettyprint lang-html linenums %s %s\">%s</pre>\n\n"
% (example_type, selected, self.rep(ex.get(example_type))))
self.write("</div>\n\n")
def showlink(self,id):
ret = ""
if id and len(id):
ret = " id=\"%s\" title=\"Link: #%s\" href=\"#%s\" class=\"clickableAnchor\" " % (id,id,id)
return ret
def _removeStackDupes(self,stack):
cleanstack = []
i = len(stack)
while i:
i -= 1
if not stack[i] in cleanstack:
cleanstack.insert(0,stack[i])
return cleanstack
def emitAcksAndSources(self,term):
sources = term.getSources()
if len(sources):
s = ""
if len(sources) > 1:
s = "s"
self.write("<h4 id=\"acks\">Source%s</h4>\n" % s)
for val in sources:
if val.startswith("http://") or val.startswith("https://"):
val = "[%s](%s)" % (val,val) #Put into markdown format
self.write(Markdown.parse(val,True))
acknowledgements = term.getAcknowledgements()
if len(acknowledgements):
s = ""
if len(acknowledgements) > 1:
s = "s"
self.write("<h4 id=\"acks\">Acknowledgement%s</h4>\n" % s)
for ack in sorted(acknowledgements):
self.write(Markdown.parse(str(ack),True))
def emitchildren(self,term):
children = term.getSubs()
log.info("CHILDREN: %s" % VTerm.term2str(children))
if (len(children) > 0):
buff = StringIO.StringIO()
for c in children:
if c.superseded() or self.hideAtticTerm(c):
continue
buff.write("<li> %s </li>" % (self.ml(c)))
if (len(buff.getvalue()) > 0 and not term.isProperty()):
if term.isDataType():
self.write("<br/><b><a %s>More specific DataTypes</a></b><ul>" % self.showlink("subtypes"))
elif term.isClass() or term.isEnumerationValue():
self.write("<br/><b><a %s>More specific Types</a></b><ul>" % self.showlink("subtypes"))
elif term.isEnumeration():
self.write("<br/><b><a %s>Enumeration members</a></b><ul>" % self.showlink("enumbers"))
self.write(buff.getvalue())
self.write("</ul>")
buff.close()
def emitHTTPHeaders(self, node):
if ENABLE_CORS:
self.response.headers.add_header("Access-Control-Allow-Origin", "*") # entire site is public.
# see http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
def setupExtensionLayerlist(self, node):
# Identify which extension layer(s) are requested
# TODO: add subdomain support e.g. bib.schema.org/Globe
# instead of Globe?ext=bib which is more for debugging.
# 1. get a comma list from ?ext=foo,bar URL notation
extlist = cleanPath( self.request.get("ext") )# for debugging
extlist = re.sub(ext_re, '', extlist).split(',')
log.debug("?ext= extension list: '%s' " % ", ".join(extlist))
# 2. Ignore ?ext=, start with 'core' only.
layerlist = [ "core"]
# 3. Use host_ext if set, e.g. 'bib' from bib.schema.org
if getHostExt() != None:
log.debug("Host: %s host_ext: %s" % ( self.request.host , getHostExt() ) )
extlist.append(getHostExt())
# Report domain-requested extensions
for x in extlist:
#log.debug("Ext filter found: %s" % str(x))
if x in ["core", "localhost", ""]:
continue
layerlist.append("%s" % str(x))
layerlist = list(set(layerlist)) # dedup
#log.info("layerlist: %s" % layerlist)
return layerlist
def handleJSONContext(self, node):
"""Handle JSON-LD Context non-homepage requests (including refuse if not enabled)."""
if not ENABLE_JSONLD_CONTEXT:
self.error(404)
self.response.out.write('<title>404 Not Found.</title><a href="/">404 Not Found (JSON-LD Context not enabled.)</a><br/><br/>')
return True
if (node=="docs/jsonldcontext.json.txt"):
label = "jsonldcontext.json.txt"
self.response.headers['Content-Type'] = "text/plain"
elif (node=="docs/jsonldcontext.json"):
label = "jsonldcontext.json"
self.response.headers['Content-Type'] = "application/ld+json"
else:
return False
jsonldcontext = getPageFromStore(label)
if not jsonldcontext:
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
PageStore.put(label,jsonldcontext)
if jsonldcontext:
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
return False
# see also handleHomepage for conneg'd version.
def handleSchemasPage(self, node, layerlist='core'):
page = getPageFromStore(node)
if page:
self.response.out.write( page )
log.debug("Serving recycled SchemasPage.")
return True
else:
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
extensions = []
for ex in sorted(ENABLED_EXTENSIONS):
if ex != ATTIC:
t = SdoConfig.getDescriptor(ex,"disambiguatingDescription")
extensions.append("<a title=\"%s\" href=\"%s\">%s.schema.org</a>" % (t,makeUrl(ex,"",full=True),ex))
page = templateRender('schemas.tpl', node, {'counts': self.getCounts(),
'extensions': extensions,
'attic': "<a href=\"%s\">%s.schema.org</a>" % (makeUrl(ATTIC,""),ATTIC),
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh SchemasPage.")
PageStore.put(node,page)
return True
def handleDumpsPage(self, node, layerlist='core'):
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
page = getPageFromStore(node)
if page:
self.response.out.write( page)
log.debug("Serving recycled DumpsPage.")
return True
else:
extensions = sorted(ENABLED_EXTENSIONS)
page = templateRender('developers.tpl', node, {'extensions': extensions,
'version': SCHEMA_VERSION,
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh DumpsPage.")
PageStore.put(node,page)
return True
def getCounts(self):
log.info("counts")
typesCount = str(countTypes(extension="core"))
log.info("TYPES %s" % typesCount)
propsCount = str(countProperties(extension="core"))
log.info("PROPS %s" % propsCount)
enumCount = str(countEnums(extension="core"))
log.info("ENUMS %s" % enumCount)
text = ""
text += "The core vocabulary currently consists of %s Types, " % typesCount
text += " %s Properties, " % propsCount
text += "and %s Enumeration values." % enumCount
return text
def handleFullHierarchyPage(self, node, layerlist='core'):
#label = 'FullTreePage - %s' % getHostExt()
#label = 'FullTreePage'
urlprefix = ''
label = node
if label.startswith('docs/'):
urlprefix = '..'
if getPageFromStore(label):
self.response.out.write( getPageFromStore(label) )
log.debug("Serving recycled %s." % label)
return True
else:
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
template = JINJA_ENVIRONMENT.get_template('full.tpl')
extlist=""
extonlylist=[]
count=0
for i in layerlist:
if i != "core":
sep = ""
if count > 0:
sep = ", "
extlist += "'%s'%s" % (i, sep)
extonlylist.append(i)
count += 1
local_button = ""
#local_label = "<h3>Core vocabulary</h3>"
local_label = ""
ext_button = ""
tops = self.gettops()
full_thing_tree = ""
thing_tree = ""
datatype_tree = ""
first = True
dtcount = 0
tcount = 0
mainroot = TypeHierarchyTree(local_label)
dtroot = TypeHierarchyTree("<h4>Data Types</h4>")
for t in tops:
if not first:
local_label = ""
first = False
top = VTerm.getTerm(t)
if top.isDataType() or top.getUri() == "http://schema.org/DataType":
dtcount += 1
dtroot.traverseForHTML(top, layers=layerlist, idprefix="D.", urlprefix=urlprefix)
else:
tcount += 1
mainroot.traverseForHTML(top, layers=layerlist, idprefix="C.", urlprefix=urlprefix, traverseAllLayers=True)
if dtcount:
datatype_tree += dtroot.toHTML()
if tcount:
full_thing_tree += mainroot.toHTML()
#fullmainroot = TypeHierarchyTree("<h3>Core plus all extension vocabularies</h3>")
#fullmainroot.traverseForHTML(uThing, layers=ALL_LAYERS_NO_ATTIC, idprefix="CE.", urlprefix=urlprefix)
#full_thing_tree = fullmainroot.toHTML()
ext_thing_tree = ""
#if len(extonlylist) > 0:
#extroot = TypeHierarchyTree("<h3>Extension: %s</h3>" % extlist)
#extroot.traverseForHTML(uThing, layers=extonlylist, traverseAllLayers=True, idprefix="E.", urlprefix=urlprefix)
#ext_thing_tree = extroot.toHTML()
#dtroot = TypeHierarchyTree("<h4>Data Types</h4>")
#dtroot.traverseForHTML(uDataType, layers=layerlist, idprefix="D.", urlprefix=urlprefix)
#datatype_tree = dtroot.toHTML()
full_button = "Core plus all extension vocabularies"
page = templateRender('full.tpl', node, { 'full_thing_tree': full_thing_tree,
'datatype_tree': datatype_tree,
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh %s." % label)
PageStore.put(label,page)
return True
def gettops(self):
return rdfgettops()
def handleJSONSchemaTree(self, node, layerlist='core'):
"""Handle a request for a JSON-LD tree representation of the schemas (RDFS-based)."""
if isinstance(node, Unit):
node = node.id
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
page = getPageFromStore(node)
if page:
self.response.out.write( page )
log.debug("Serving recycled JSONLDThingTree.")
return True
else:
mainroot = TypeHierarchyTree()
mainroot.traverseForJSONLD(VTerm.getTerm("Thing"), layers=layerlist)
thing_tree = mainroot.toJSON()
self.response.out.write( thing_tree )
log.debug("Serving fresh JSONLDThingTree.")
PageStore.put(node,thing_tree)
return True
return False
def checkConneg(self,node):
accept_header = self.request.headers.get('Accept')
if accept_header:
accept_header = accept_header.split(',')
else:
accept_header = ""
target = None
for ah in accept_header:
if target:
break
ah = re.sub( r";q=\d?\.\d+", '', ah).rstrip()
log.debug("ACCEPT %s" % ah)
if ah == "text/html":
return False
elif ah == "application/ld+json":
target = ".jsonld"
elif ah == "application/x-turtle":
target = ".ttl"
elif ah == "application/rdf+xml":
target = ".rdf"
elif ah == "text/plain":
target = ".nt"
elif ah == "text/csv":
target = ".csv"
if target:
self.response.set_status(303,"See Other")
self.response.headers['Location'] = makeUrl("","%s%s" % (node,target))
self.emitCacheHeaders()
return True
return False
def handleExactTermPage(self, node, layers='core'):
baseuri = SdoConfig.baseUri()
if node.startswith(baseuri): #Special case will map full schema URI to the term name
node = node[len(baseuri):]
"""Handle with requests for specific terms like /Person, /fooBar. """
dataext = os.path.splitext(node)
if dataext[1] in OUTPUTDATATYPES:
ret = self.handleExactTermDataOutput(dataext[0],dataext[1])
if ret == True:
return True
if self.checkConneg(node):
return True
log.info("GETTING TERM: %s" % node)
term = VTerm.getTerm(node)
if not term:
return False
if not self.checkNodeExt(term):
return False
if not SUBDOMAINS or term.inLayers(layers):
self.emitExactTermPage(term, layers=layers)
return True
def checkNodeExt(self,term):
if os.environ.get('STAYINEXTENTION',"False").lower() == "true":
return True
home = term.getLayer()
ext = getHostExt()
log.info("term: '%s' home: '%s' ext: '%s'" % (term,home,ext))
if home == CORE and ext == '':
return True
if SUBDOMAINS:
log.info("Checking for correct subdomain")
if home == ext:
return True
if home == CORE:
log.info("Redirecting to core entity")
self.redirectToBase(term.getId(),full=True)
else:
log.info("Redirecting to '%s' entity" % home)
self.redirectToExt(term.getId(),ext=home, full=True)
return False
else: #SUBDOMAINS == False
if ext == '':
return True
else:
log.info("SUBDOMAINS dissabled - Redirecting to core entity")
self.redirectToBase(term.getId(),full=True)
return False
def handleExactTermDataOutput(self, node=None, outputtype=None):
log.info("handleExactTermDataOutput Node: '%s' Outputtype: '%s'" % (node, outputtype))
ret = False
file = None
if node and outputtype:
term = VTerm.getTerm(node)
if term:
ret = True
index = "%s:%s%s" % (outputtype,node,outputtype)
data = getPageFromStore(index)
excludeAttic=True
if getHostExt()== ATTIC:
excludeAttic=False
if outputtype == ".csv":
self.response.headers['Content-Type'] = "text/csv; charset=utf-8"
if not data:
data = self.emitcsvTerm(term,excludeAttic)
PageStore.put(index,data)
else:
format = None
if outputtype == ".jsonld":
self.response.headers['Content-Type'] = "application/ld+json; charset=utf-8"
format = "json-ld"
elif outputtype == ".json":
self.response.headers['Content-Type'] = "application/json; charset=utf-8"
format = "json"
elif outputtype == ".ttl":
self.response.headers['Content-Type'] = "application/x-turtle; charset=utf-8"
format = "turtle"
elif outputtype == ".rdf" or outputtype == ".xml" :
self.response.headers['Content-Type'] = "application/rdf+xml; charset=utf-8"
format = "pretty-xml"
elif outputtype == ".nt":
self.response.headers['Content-Type'] = "text/plain; charset=utf-8"
format = "nt"
if format:
if not data:
data = serializeSingleTermGrapth(node=node, format=format, excludeAttic=True)
PageStore.put(index,data)
if data:
self.emitCacheHeaders()
self.response.out.write( data )
ret = True
return ret
def emitcsvTerm(self,term,excludeAttic=True):
csv = sdordf2csv(queryGraph=getQueryGraph(),fullGraph=getQueryGraph(),markdownComments=True,excludeAttic=excludeAttic)
file = StringIO.StringIO()
termUri = term.getUri()
if term.isClass() or term.isEnumerationValue():
csv.type2CSV(header=True,out=file)
csv.type2CSV(term=termUri,header=False,out=file)
elif term.isProperty():
csv.prop2CSV(header=True,out=file)
csv.prop2CSV(term=termUri,header=False,out=file)
data = file.getvalue()
file.close()
return data
def handle404Failure(self, node, layers="core", extrainfo=None, suggest=True):
self.error(404)
self.emitSchemaorgHeaders("404 Not Found")
#404 could be called from any path, so output all potential locations of schemaorg.css
self.response.out.write('<link rel="stylesheet" type="text/css" href="../docs/schemaorg.css" />')
self.response.out.write('<link rel="stylesheet" type="text/css" href="docs/schemaorg.css" />')
self.response.out.write('<link rel="stylesheet" type="text/css" href="/docs/schemaorg.css" />')
self.response.out.write('<h3>404 Not Found.</h3><p><br/>Page not found. Please <a href="/">try the homepage.</a><br/><br/></p>')
if suggest:
clean_node = cleanPath(node)
log.debug("404: clean_node: clean_node: %s node: %s" % (clean_node, node))
base_term = VTerm.getTerm( node.rsplit('/')[0] )
if base_term != None :
self.response.out.write('<div>Perhaps you meant: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_term.getId(), base_term.getId() ))
base_actionprop = VTerm.getTerm( node.rsplit('-')[0] )
if base_actionprop != None :
self.response.out.write('<div>Looking for an <a href="/Action">Action</a>-related property? Note that xyz-input and xyz-output have <a href="/docs/actions.html">special meaning</a>. See also: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_actionprop.getId(), base_actionprop.getId() ))
if extrainfo:
self.response.out.write("<div>%s</div>" % extrainfo)
self.response.out.write("</div>\n</body>\n<!--AppEngineVersion %s -->\n</html>\n" % getAppEngineVersion())
return True
def handleFullReleasePage(self, node, layerlist='core'):
"""Deal with a request for a full release summary page. Lists all terms and their descriptions inline in one long page.
version/latest/ is from current schemas, others will need to be loaded and emitted from stored HTML snapshots (for now)."""
# http://jinja.pocoo.org/docs/dev/templates/
global releaselog
clean_node = cleanPath(node)
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
requested_version = clean_node.rsplit('/')[1]
requested_format = clean_node.rsplit('/')[-1]
if len( clean_node.rsplit('/') ) == 2:
requested_format=""
log.info("Full release page for: node: '%s' cleannode: '%s' requested_version: '%s' requested_format: '%s' l: %s" % (node, clean_node, requested_version, requested_format, len(clean_node.rsplit('/')) ) )
# Full release page for: node: 'version/' cleannode: 'version/' requested_version: '' requested_format: '' l: 2
# /version/
log.debug("clean_node: %s requested_version: %s " % (clean_node, requested_version))
if (clean_node=="version/" or clean_node=="version") and requested_version=="" and requested_format=="":
log.info("Table of contents should be sent instead, then succeed.")
if getPageFromStore('tocVersionPage'):
self.response.out.write( getPageFromStore('tocVersionPage'))
return True
else:
log.debug("Serving tocversionPage from cache.")
page = templateRender('tocVersionPage.tpl', node,
{"releases": sorted(releaselog.iterkeys()),
"menu_sel": "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh tocVersionPage.")
PageStore.put("tocVersionPage",page)
return True
if requested_version in releaselog:
log.info("Version '%s' was released on %s. Serving from filesystem." % ( node, releaselog[requested_version] ))
version_rdfa = "data/releases/%s/schema.rdfa" % requested_version
version_allhtml = "data/releases/%s/schema-all.html" % requested_version
version_nt = "data/releases/%s/schema.nt" % requested_version
if requested_format=="":
self.response.out.write( open(version_allhtml, 'r').read() )
return True
# log.info("Skipping filesystem for now.")
if requested_format=="schema.rdfa":
self.response.headers['Content-Type'] = "application/octet-stream" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.html" % requested_version
self.response.out.write( open(version_rdfa, 'r').read() )
return True
if requested_format=="schema.nt":
self.response.headers['Content-Type'] = "application/n-triples" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.nt" % requested_version
self.response.out.write( open(version_nt, 'r').read() )
return True
if requested_format != "":
return False # Turtle, csv etc.
else:
log.info("Unreleased version requested. We only understand requests for latest if unreleased.")
if requested_version != "build-latest":
return False
log.info("giving up to 404.")
else: # build-latest
requested_version = SCHEMA_VERSION
log.info("generating a live view of this latest release (with SCHEMA_VERSION set as: %s)." % SCHEMA_VERSION)
if getPageFromStore('FullReleasePage.html'):
self.response.out.write( getPageFromStore('FullReleasePage.html') )
log.debug("Serving recycled FullReleasePage.")
return True
else:
mainroot = TypeHierarchyTree()
mainroot.traverseForHTML(VTerm.getTerm("Thing"), hashorslash="#term_", layers=layerlist)
thing_tree = mainroot.toHTML()
base_href = "/version/%s/" % requested_version
az_types = GetAllTypes()
az_types.sort()
az_type_meta = {}
az_props = GetAllProperties()
az_props.sort()
az_prop_meta = {}
# TYPES
for t in az_types:
props4type = HTMLOutput() # properties applicable for a type
props2type = HTMLOutput() # properties that go into a type
self.emitSimplePropertiesPerType(t, out=props4type, hashorslash="#term_" )
self.emitSimplePropertiesIntoType(t, out=props2type, hashorslash="#term_" )
tcmt = Markup(VTerm.getTerm(t).getComment())
az_type_meta[t]={}
az_type_meta[t]['comment'] = tcmt
az_type_meta[t]['props4type'] = props4type.toHTML()
az_type_meta[t]['props2type'] = props2type.toHTML()
# PROPERTIES
for pt in az_props:
attrInfo = HTMLOutput()
rangeList = HTMLOutput()
domainList = HTMLOutput()
# self.emitAttributeProperties(pt, out=attrInfo, hashorslash="#term_" )
# self.emitSimpleAttributeProperties(pt, out=rangedomainInfo, hashorslash="#term_" )
self.emitRangeTypesForProperty(pt, out=rangeList, hashorslash="#term_" )
self.emitDomainTypesForProperty(pt, out=domainList, hashorslash="#term_" )
cmt = Markup(VTerm.getTerm(pt).getComment())
az_prop_meta[pt] = {}
az_prop_meta[pt]['comment'] = cmt
az_prop_meta[pt]['attrinfo'] = attrInfo.toHTML()
az_prop_meta[pt]['rangelist'] = rangeList.toHTML()
az_prop_meta[pt]['domainlist'] = domainList.toHTML()
if requested_version == "build-latest":
requested_version = SCHEMA_VERSION
releasedate = "XXXX-XX-XX (UNRELEASED PREVIEW VERSION)"
else:
releasedate = releaselog[str(SCHEMA_VERSION)]
page = templateRender('fullReleasePage.tpl', node,
{"base_href": base_href,
'thing_tree': thing_tree,
'liveversion': SCHEMA_VERSION,
'requested_version': requested_version,
'releasedate': releasedate,
'az_props': az_props, 'az_types': az_types,
'az_prop_meta': az_prop_meta, 'az_type_meta': az_type_meta,
'menu_sel': "Documentation"})
self.response.out.write( page )
log.debug("Serving fresh FullReleasePage.")
PageStore.put("FullReleasePage.html",page)
return True
def handleExtensionContents(self,ext):
if not ext in ENABLED_EXTENSIONS:
return ""
# if getPageFromStore('ExtensionContents',ext):
# return getPageFromStore('ExtensionContents',ext)
buff = StringIO.StringIO()
az_terms = VTerm.getAllTerms(layer=ext) #Returns sorted by id results.
az_terms.sort(key = lambda u: u.category)
if len(az_terms) > 0:
buff.write("<br/><div style=\"text-align: left; margin: 2em\"><h3>Terms defined in the '%s' section.</h3>" % ext)
keys = []
groups = []
for k,g in itertools.groupby(az_terms, key = lambda u: u.category):
keys.append(k)
groups.append(list(g))
i = 0
while i < len(groups):
groups[i] = sorted(groups[i],key = lambda u: u.id)
i += 1
g=0
while g < len(groups):
if g > 0:
buff.write("<br/>")
buff.write(self.listTerms(groups[g],"<br/>%s Types (%s)<br/>" %
(keys[g],self.countTypes(groups[g],select="type",layers=ext)),select="type",layers=ext))
buff.write(self.listTerms(groups[g],"<br/>%s Properties (%s)<br/>" %
(keys[g],self.countTypes(groups[g],select="prop",layers=ext)),select="prop",layers=ext))
buff.write(self.listTerms(groups[g],"<br/>%s Enumeration values (%s)<br/>" %
(keys[g],self.countTypes(groups[g],select="enum",layers=ext)),select="enum",layers=ext))
g += 1
buff.write("</div>")
ret = buff.getvalue()
# PageStore.put('ExtensionContents',ret,ext)
buff.close()
return ret
def countTypes(self,interms,select="",layers='core'):
ret = 0
for t in interms:
if select == "type" and t.isClass():
ret += 1
elif select == "prop" and t.isProperty():
ret += 1
elif select == "enum" and t.isEnumerationValue():
ret +=1
elif select == "":
ret += 1
return ret
def listTerms(self,interms,prefix="",select=None,layers='core'):
buff = StringIO.StringIO()
terms = interms
if select:
terms = []
for t in interms:
use = False
if select == "type":
use = t.isClass()
elif select == "prop":
use = t.isProperty()
elif select == "enum":
use = t.isEnumerationValue()
if use:
terms.append(t)
if(len(terms) > 0):
buff.write(prefix)
first = True
sep = ""
for term in terms:
if not first:
sep = ", "
else:
first = False
buff.write("%s%s" % (sep,self.ml(term)))
ret = buff.getvalue()
buff.close()
return ret
def setupHostinfo(self, node, test=""):
global noindexpages
node = str(node)
hostString = test
host_ext = ""
args = []
if test == "":
hostString = self.request.host
args = self.request.arguments()
ver=None
if not getInTestHarness():
from google.appengine.api.modules.modules import get_current_version_name
ver = get_current_version_name()
if hostString.startswith("%s." % ver):
log.info("Removing version prefix '%s' from hoststring" % ver)
hostString = hostString[len(ver) + 1:]
scheme = "http" #Defalt for tests
if not getInTestHarness(): #Get the actual scheme from the request
scheme = self.request.scheme
setHttpScheme(scheme)
match = re.match( r'([\w\-_]+)[\.:]?', hostString)
host_ext = str(match.group(1))
match0 = str(match.group(0))
if host_ext + ":" == match0: #Special case for URLs with no subdomains - eg. localhost
host_ext = ""
split = hostString.rsplit(':')
myhost = split[0]
mybasehost = myhost
myport = "80"
if len(split) > 1:
myport = split[1]
setHostPort(myport)
log.info("setupHostinfo: data: scheme='%s' hoststring='%s' initial host_ext='%s'" % (scheme, hostString, str(host_ext) ))
ver=None
if not getInTestHarness():
from google.appengine.api.modules.modules import get_current_version_name
ver = get_current_version_name()
if host_ext != "":
if host_ext in ENABLED_EXTENSIONS:
mybasehost = mybasehost[len(host_ext) + 1:]
elif host_ext == "www":
mybasehost = mybasehost[4:]
setBaseHost(mybasehost)
log.info("Host extention '%s' - redirecting to '%s'" % (host_ext,mybasehost))
return self.redirectToBase(node,True)
else:
tempbase = mybasehost[len(host_ext)+1:]
if tempbase in WORKINGHOSTS: #Known hosts so can control extention values
mybasehost = tempbase
setHostExt("")
setBaseHost(mybasehost)
log.info("Host extention '%s' not enabled - redirecting to '%s'" % (host_ext,mybasehost))
return self.redirectToBase(node,True)
else: #Unknown host so host_ext may be just part of the host string
host_ext = ""
log.info("setupHostinfo: calculated: basehost='%s' host_ext='%s'" % (mybasehost, host_ext ))
setHostExt(host_ext)
setBaseHost(mybasehost)
if mybasehost == "schema.org":
noindexpages = False
if "FORCEINDEXPAGES" in os.environ:
if os.environ["FORCEINDEXPAGES"] == "True":
noindexpages = False
log.info("[%s] noindexpages: %s" % (getInstanceId(short=True),noindexpages))
setHostExt(host_ext)
setBaseHost(mybasehost)
setHostPort(myport)
setArguments(args)
dcn = host_ext
if dcn == None or dcn == "" or dcn =="core":
dcn = "core"
if scheme != "http":
dcn = "%s-%s" % (dcn,scheme)
dcn = "single" #Forcing single cache
#log.info("Forcing single cache. !!!!!!!!!!!!!!!!")
#log.info("sdoapp.py setting current datacache to: %s " % dcn)
DataCache.setCurrent(dcn)
PageStore.setCurrent(dcn)
HeaderStore.setCurrent(dcn)
debugging = False
if "localhost" in hostString or "sdo-phobos.appspot.com" in hostString or FORCEDEBUGGING:
debugging = True
setAppVar('debugging',debugging)
return True
def redirectToBase(self,node="",full=False):
uri = makeUrl("",node,full)
log.info("Redirecting [301] to: %s" % uri)
if not getInTestHarness():
self.response = webapp2.redirect(uri, True, 301)
return False
def redirectToExt(self,node="",ext="",full=False):
uri = makeUrl(ext,node,full)
log.info("Redirecting [301] to: %s" % uri)
if not getInTestHarness():
self.response = webapp2.redirect(uri, True, 301)
return False
def head(self, node):
self.get(node) #Get the page
#Clear the request & payload and only put the headers and status back
hdrs = self.response.headers.copy()
stat = self.response.status
self.response.clear()
self.response.headers = hdrs
self.response.status = stat
return
def get(self, node):
if not self.setupHostinfo(node):
return
log.info("NODE: '%s'" % node)
if not node or node == "":
node = "/"
if not validNode_re.search(str(node)) or os.path.basename(str(node)).count('.') > 2: #invalid node name
log.warning("Invalid node name '%s'" % str(node))
self.handle404Failure(node,suggest=False)
return
NotModified = False
matchTag = self.request.headers.get("If-None-Match",None)
unMod = self.request.headers.get("If-Unmodified-Since",None)
#log.info("matchTag '%s' unMod '%s'" % (matchTag,unMod))
hdrIndex = getHostExt()
if len(hdrIndex):
hdrIndex += ":"
hdrIndex += node
hdrs = HeaderStore.get(hdrIndex)
mod = None
if hdrs:
etag = hdrs.get("ETag",None)
mod = hdrs.get("Last-Modified",None)
log.info("stored etag '%s' mod '%s'" % (etag,mod))
if matchTag == etag:
NotModified = True
elif unMod:
unModt = datetime.datetime.strptime(unMod,"%a, %d %b %Y %H:%M:%S %Z")
modt = datetime.datetime.strptime(mod,"%a, %d %b %Y %H:%M:%S %Z")
if modt <= unModt:
log.info("Last mod '%s' not modified since '%s' " % (mod,unMod))
NotModified = True
if hdrs and "_pageFlush" in getArguments():
log.info("Reloading header for %s" % hdrIndex)
HeaderStore.remove(hdrIndex)
hdrs = None
NotModified = False
if NotModified:
self.response.clear()
self.response.headers = hdrs
self.response.set_status(304,"Not Modified")
else:
enableCaching = self._get(node) #Go get the page
if enableCaching:
if self.response.status.startswith("200"):
stat = getAppVar(CLOUDSTAT)
log.info("CLOUDSTAT %s" % stat)
if stat: #Use values from cloud storage
self.response.headers.add_header("ETag", stat.etag)
self.response.headers['Last-Modified'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",time.gmtime(stat.st_ctime))
self.response.headers['Content-Type'] = stat.content_type
else:
if not self.response.headers.get('Content-Type',None):
mimetype, contentType = mimetypes.guess_type(node)
self.response.headers['Content-Type'] = mimetype
self.response.headers.add_header("ETag", getslug() + str(hash(hdrIndex)))
self.response.headers['Last-Modified'] = getmodiftime().strftime("%a, %d %b %Y %H:%M:%S GMT")
store = True
if mod: #Previous hdrs cached for this node
new = self.response.headers.get('Last-Modified',None)
if new and new == mod: #previous cached hdrs has same time as new one
store = False #No point storing it again
if store:
retHdrs = self.response.headers.copy()
try:
HeaderStore.put(hdrIndex,retHdrs) #Cache these headers for a future 304 return
except Exception as e:
log.warning("HeaderStore.put(%s) returned exception: %s" % (hdrIndex,e))
log.info("Abandoning caching of response headers for '%s'" % node)
pass
#self.response.set_cookie('GOOGAPPUID', getAppEngineVersion())
log.info("Responding:\n%s\nstatus: %s\n%s" % (node,self.response.status,self.response.headers ))
def _get(self, node, doWarm=True):
global LOADEDSOURCES
"""Get a schema.org site page generated for this node/term.
Web content is written directly via self.response.
CORS enabled all URLs - we assume site entirely public.
See http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
These should give a JSON version of schema.org:
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json.txt
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/
Per-term pages vary for type, property and enumeration.
Last resort is a 404 error if we do not exactly match a term's id.
See also https://webapp-improved.appspot.com/guide/request.html#guide-request
Return True to enable browser caching ETag/Last-Modified - False for no cache
"""
global_vars.time_start = datetime.datetime.now()
tick() #keep system fresh
log.info("[%s] _get(%s)" % (getInstanceId(short=True),node))
self.callCount()
if (node in silent_skip_list):
return False
if ENABLE_HOSTED_EXTENSIONS:
layerlist = self.setupExtensionLayerlist(node) # e.g. ['core', 'bib']
else:
layerlist = ["core"]
setSiteName(self.getExtendedSiteName(layerlist)) # e.g. 'bib.schema.org', 'schema.org'
log.debug("EXT: set sitename to %s " % getSiteName())
if not LOADEDSOURCES:
log.info("Instance[%s] received request for not stored page: %s" % (getInstanceId(short=True), node) )
log.info("Instance[%s] needs to load sources to create it" % (getInstanceId(short=True)) )
load_sources() #Get Examples files and schema definitions
self.emitHTTPHeaders(node) #Ensure we have the right basic header values
if node.startswith("docs/"):
return self._getDocs(node,layerlist=layerlist)
if(node == "_ah/warmup"):
if "localhost" in os.environ['SERVER_NAME'] and WarmupState.lower() == "auto":
log.info("[%s] Warmup dissabled for localhost instance" % getInstanceId(short=True))
if DISABLE_NDB_FOR_LOCALHOST:
log.info("[%s] NDB dissabled for localhost instance" % getInstanceId(short=True))
enablePageStore("INMEM")
else:
if not memcache.get("warmedup"):
memcache.set("warmedup", value=True)
self.warmup()
else:
log.info("Warmup already actioned")
return False
#elif doWarm: #Do a bit of warming on each call
#global WarmedUp
#global Warmer
#if not WarmedUp:
#Warmer.stepWarm(self)
if(node == "admin/refresh"):
log.info("Processing refesh request")
load_start = datetime.datetime.now()
memcache.flush_all()
memcache.set(key="app_initialising", value=True, time=300) #Give the system 5 mins - auto remove flag in case of crash
cleanmsg = CacheControl.clean()
log.info("Clean count(s): %s" % cleanmsg)
log.info(("[%s] Cache clean took %s " % (getInstanceId(short=True),(datetime.datetime.now() - load_start))))
memcache.set(key="app_initialising", value=False)
storeInitialisedTimestamp()
self.emitSchemaorgHeaders("Refresh")
#404 could be called from any path, so output all potential locations of schemaorg.css
self.response.out.write('<link rel="stylesheet" type="text/css" href="../docs/schemaorg.css" />')
self.response.out.write('<link rel="stylesheet" type="text/css" href="docs/schemaorg.css" />')
self.response.out.write('<link rel="stylesheet" type="text/css" href="/docs/schemaorg.css" />')
self.response.out.write('<h3>Refresh Completed</h3><p>Took: %s</p>' % (datetime.datetime.now() - load_start))
return False
if(node == "_ah/start"):
log.info("Instance[%s] received Start request at %s" % (modules.get_current_instance_id(), global_vars.time_start) )
if "localhost" in os.environ['SERVER_NAME'] and WarmupState.lower() == "auto":
log.info("[%s] Warmup dissabled for localhost instance" % getInstanceId(short=True))
if DISABLE_NDB_FOR_LOCALHOST:
log.info("[%s] NDB dissabled for localhost instance" % getInstanceId(short=True))
enablePageStore("INMEM")
else:
if not memcache.get("warmedup"):
memcache.set("warmedup", value=True)
self.warmup()
else:
log.info("Warmup already actioned")
return False
if(node == "_ah/stop"):
log.info("Instance[%s] received Stop request at %s" % (modules.get_current_instance_id(), global_vars.time_start) )
log.info("Flushing memcache")
memcache.flush_all()
return False
if (node in ["", "/"]):
return self.handleHomepage(node)
currentVerPath = "version/%s" % SCHEMA_VERSION
if(node.startswith("version/latest")):
newurl = "%s%s" % (currentVerPath,node[14:])
log.info("REDIRECTING TO: %s" % newurl)
self.response.set_status(302,"Found")
self.response.headers['Location'] = makeUrl("",newurl)
self.emitCacheHeaders()
return False #don't cache this redirect
#Match nodes of pattern 'version/*' 'version/*/' or 'version/'
if (re.match(r'^version/[^/]*$', str(node)) or re.match(r'^version/[^/]*/$', str(node)) or node == "version/") :
if self.handleFullReleasePage(node, layerlist=layerlist):
return True
else:
log.info("Error handling full release page: %s " % node)
if self.handle404Failure(node):
return False
else:
log.info("Error handling 404 under /version/")
return False
if(node == "_siteDebug"):
if(getBaseHost() != "schema.org" or os.environ['PRODSITEDEBUG'] == "True"):
self.siteDebug()
return False #Treat as a dynamic page - suppress Etags etc.
if(node == "_cacheFlush"):
setmodiftime(datetime.datetime.utcnow()) #Resets etags and modtime
counts = CacheControl.clean(pagesonly=True)
inf = "<div style=\"clear: both; float: left; text-align: left; font-size: xx-small; color: #888 ; margin: 1em; line-height: 100%;\">"
inf += str(counts)
inf += "</div>"
self.handle404Failure(node,extrainfo=inf)
return False
# Pages based on request path matching a Unit in the term graph:
if self.handleExactTermPage(node, layers=layerlist):
return True
else:
log.info("Error handling exact term page. Assuming a 404: %s" % node)
# Drop through to 404 as default exit.
if self.handle404Failure(node):
return False
else:
log.info("Error handling 404.")
return False
def _getDocs(self, node, layerlist=""):
hstext = getHostExt()
if hstext == "":
hstext = "core"
if (node.startswith("docs/") and hstext != "core"): #All docs should operate in core
return self.redirectToBase(node,True)
if node in ["docs/jsonldcontext.json.txt", "docs/jsonldcontext.json"]:
if self.handleJSONContext(node):
return True
else:
log.info("Error handling JSON-LD context: %s" % node)
return False
elif (node == "docs/full.html"):
if self.handleFullHierarchyPage(node, layerlist=layerlist):
return True
else:
log.info("Error handling full.html : %s " % node)
return False
elif (node == "docs/schemas.html"):
if self.handleSchemasPage(node, layerlist=layerlist):
return True
else:
log.info("Error handling schemas.html : %s " % node)
return False
elif (node == "docs/developers.html"):
if self.handleDumpsPage(node, layerlist=layerlist):
return True
else:
log.info("Error handling developers.html : %s " % node)
return False
elif (node == "docs/tree.jsonld" or node == "docs/tree.json"):
if self.handleJSONSchemaTree(node, layerlist=ALL_LAYERS):
return True
else:
log.info("Error handling JSON-LD schema tree: %s " % node)
return False
else: #Asking for a sttic file under docs
return self.handleStaticDoc(node)
def handleStaticDoc(self,node):
if PAGESTOREMODE == "CLOUDSTORE":
log.info("Asking for: %s" % node)
page = getPageFromStore(node,enableFlush=False)
if page:
self.response.out.write( page )
log.debug("Serving static page: %s" % node)
return True
else:
self.handle404Failure(node)
return False
return False
def siteDebug(self):
global STATS
page = templateRender('siteDebug.tpl', "_siteDebug" )
self.response.out.write( page )
ext = getHostExt()
if ext == "":
ext = "core"
self.response.out.write("<div style=\"display: none;\">\nLAYER:%s\n</div>" % ext)
self.response.out.write("<table style=\"width: 70%; border: solid 1px #CCCCCC; border-collapse: collapse;\"><tbody>\n")
self.writeDebugRow("Setting","Value",True)
if SHAREDSITEDEBUG:
self.writeDebugRow("System start",memcache.get("SysStart"))
inst = memcache.get("Instances")
extinst = memcache.get("ExitInstances")
self.writeDebugRow("Running instances(%s)" % len(memcache.get("Instances")),inst.keys())
self.writeDebugRow("Instance exits(%s)" % len(memcache.get("ExitInstances")),extinst.keys())
self.writeDebugRow("httpScheme",getHttpScheme())
self.writeDebugRow("host_ext",getHostExt())
self.writeDebugRow("basehost",getBaseHost())
self.writeDebugRow("hostport",getHostPort())
self.writeDebugRow("sitename",getSiteName())
self.writeDebugRow("debugging",getAppVar('debugging'))
self.writeDebugRow("intestharness",getInTestHarness())
if SHAREDSITEDEBUG:
self.writeDebugRow("total calls",memcache.get("total"))
for s in ALL_LAYERS:
self.writeDebugRow("%s calls" % s, memcache.get(s))
for s in ["http","https"]:
self.writeDebugRow("%s calls" % s, memcache.get(s))
self.writeDebugRow("This Instance ID",os.environ["INSTANCE_ID"],True)
self.writeDebugRow("Instance Calls", callCount)
self.writeDebugRow("Instance Memory Usage [Mb]", str(runtime.memory_usage()).replace("\n","<br/>"))
self.writeDebugRow("Instance Current DataCache", DataCache.getCurrent())
self.writeDebugRow("Instance DataCaches", len(DataCache.keys()))
for c in DataCache.keys():
self.writeDebugRow("Instance DataCache[%s] size" % c, len(DataCache.getCache(c) ))
self.response.out.write("</tbody><table><br/>\n")
self.response.out.write("</div>\n</body>\n<!--AppEngineVersion %s -->\n</html>\n" % getAppEngineVersion())
def writeDebugRow(self,term,value,head=False):
rt = "td"
cellStyle = "border: solid 1px #CCCCCC; vertical-align: top; border-collapse: collapse;"
if head:
rt = "th"
cellStyle += " color: #FFFFFF; background: #888888;"
leftcellStyle = cellStyle
leftcellStyle += " width: 35%"
divstyle = "width: 100%; max-height: 100px; overflow: auto"
self.response.out.write("<tr><%s style=\"%s\">%s</%s><%s style=\"%s\"><div style=\"%s\">%s</div></%s></tr>\n" % (rt,leftcellStyle,term,rt,rt,cellStyle,divstyle,value,rt))
def callCount(self):
global instance_first
global instance_num
global callCount
callCount += 1
if(instance_first):
instance_first = False
instance_num += 1
if SHAREDSITEDEBUG:
if(memcache.add(key="Instances",value={})):
memcache.add(key="ExitInstances",value={})
memcache.add(key="http",value=0)
memcache.add(key="https",value=0)
memcache.add(key="total",value=0)
for i in ALL_LAYERS:
memcache.add(key=i,value=0)
Insts = memcache.get("Instances")
Insts[os.environ["INSTANCE_ID"]] = 1
memcache.replace("Instances",Insts)
if SHAREDSITEDEBUG:
memcache.incr("total")
memcache.incr(getHttpScheme())
if getHostExt() != "":
memcache.incr(getHostExt())
else:
memcache.incr("core")
def warmup(self):
global WarmedUp
global Warmer
if WarmedUp:
return
warm_start = datetime.datetime.now()
log.debug("Instance[%s] received Warmup request at %s" % (modules.get_current_instance_id(), datetime.datetime.utcnow()) )
if memcache.get("Warming"):
log.debug("Instance[%s] detected system already warming" % (modules.get_current_instance_id()) )
else:
memcache.set("Warming",True,time=300)
Warmer.warmAll(self)
log.debug("Instance[%s] completed Warmup request at %s elapsed: %s" % (modules.get_current_instance_id(), datetime.datetime.utcnow(),datetime.datetime.now() - warm_start ) )
memcache.set("Warming",False)
class WarmupTool():
def __init__(self):
#self.pageList = ["docs/schemas.html"]
self.pageList = ["/","docs/schemas.html","docs/full.html","docs/tree.jsonld","docs/developers.html","docs/jsonldcontext.json"]
self.extPageList = ["/"] #Pages warmed in all extentions
self.warmPages = {}
for l in ALL_LAYERS:
self.warmPages[l] = []
self.warmedLayers = []
def stepWarm(self, unit=None, layer=None):
lock = threading.Lock()
with lock:
realHostExt = getHostExt()
if layer:
setHostExt(layer)
self._stepWarm(unit=unit, layer=layer)
setHostExt(realHostExt)
def _stepWarm(self, unit=None, layer=None):
global WarmedUp
if not layer:
layer = getHostExt()
if layer == "":
layer = "core"
if not unit or WarmedUp:
return
if layer in self.warmedLayers: #Done all for this layer
return
warmedPages = False
for p in self.pageList:
if p not in self.warmPages[layer]:
self.warmPages[layer].append(p)
if layer == "core" or p in self.extPageList: #Only warm selected pages in extensions
log.info("Warming page %s in layer %s" % (p,layer))
unit._get(p,doWarm=False)
unit.response.clear()
if len(self.warmPages[layer]) == len(self.pageList):
warmedPages = True
break
if warmedPages: #Must be all warmed for this layer
log.info("All warmed in layer %s" % layer)
self.warmedLayers.append(layer)
self.checkAll()
def checkAll(self):
global WarmedUp
allDone = True
for l in ALL_LAYERS:
if l != "" and l not in self.warmedLayers:
allDone = False
break
if allDone:
WarmedUp = True
log.info("All layers warmed!")
def warmAll(self,unit):
global WarmedUp
while not WarmedUp:
for l in ALL_LAYERS:
self.stepWarm(layer=l,unit=unit)
Warmer = WarmupTool()
def getExtenstionDescriptions():
extDisambiguatingDescription = ""
extComment = ""
extlinktext = ""
extVers = ""
extName = ""
extDD = ""
ex = getHostExt()
if ex and len(ex):
descs = api.SdoConfig.descriptor(ex)
if descs and len(descs):
extName = descs[0].get("name")
extDD = Markdown.parse(descs[0].get("brief"))
extVers = Markdown.parse(descs[0].get("version"))
extlinktext = Markdown.parse(descs[0].get("linktext"))
extComment = Markdown.parse(descs[0].get("comment"))
extDisambiguatingDescription = Markdown.parse(descs[0].get("extDisambiguatingDescription"))
return extName, extDD, extVers, extlinktext, extComment, extDisambiguatingDescription
def templateRender(templateName, node, values=None):
global sitemode #,sitename
#log.info("templateRender(%s,%s,%s)" % (templateName, node, values))
#log.info("getHostExt %s" % getHostExt())
if isinstance(node, Unit):
node = node.id
if isinstance(node, VTerm):
node = node.getId()
extName, extDD, extVers, extlinktext, extComment, extDisambiguatingDescription = getExtenstionDescriptions()
if node.startswith("docs/"):
docsdir = "./"
homedir = ".."
elif node.startswith("version/"):
docsdir = "/docs/"
homedir = ""
else:
docsdir = "docs/"
homedir = "."
defvars = {
'ENABLE_HOSTED_EXTENSIONS': ENABLE_HOSTED_EXTENSIONS,
'SCHEMA_VERSION': SCHEMA_VERSION,
'appengineVersion': getAppEngineVersion(),
'debugging': getAppVar('debugging'),
'docsdir': docsdir,
'extlinktext': extlinktext,
'extDisambiguatingDescription':extDisambiguatingDescription,
'extComment': extComment,
'extDD': extDD,
'extName': extName,
'extVers': extVers,
'extensionPath': makeUrl(getHostExt(),"",full=True),
'homedir': homedir,
'host_ext': getHostExt(),
'mybasehost': getBaseHost(),
'myhost': getHost(),
'myport': getHostPort(),
'sitemode': sitemode,
'sitename': SdoConfig.getname(),
'staticPath': homedir,
'targethost': makeUrl("","",full=True),
'vocabUri': SdoConfig.vocabUri()
}
if values:
defvars.update(values)
template = JINJA_ENVIRONMENT.get_template(templateName)
return template.render(defvars)
def oldtemplateRender(templateName, node, values=None):
global sitemode #,sitename
log.info("templateRender(%s,%s,%s)" % (templateName, node, values))
log.info("getHostExt %s" % getHostExt())
if isinstance(node, Unit):
node = node.id
extDef = Unit.GetUnit(getNss(getHostExt()),True)
extComment = ""
extVers = ""
extName = ""
#log.info("EXDEF '%s'" % extDef)
if extDef:
extComment = GetComment(extDef,ALL_LAYERS)
if extComment == "-":
extComment = ""
extDDs = GetTargets(Unit.GetUnit("schema:disambiguatingDescription", True), extDef, layers=ALL_LAYERS )
if len(extDDs) > 0:
extDD = Markdown.parse(extDDs[0])
else:
extDD = ""
first = True
for ver in GetsoftwareVersions(extDef, ALL_LAYERS):
if first:
first = False
extVers = "<em>(Extension version: "
else:
extVers += ", "
extVers += Markdown.parse(ver)
if len(extVers) :
extVers += ")</em>"
nms = GetTargets(Unit.GetUnit("schema:name", True), extDef, layers=ALL_LAYERS )
if len(nms) > 0:
extName = nms[0]
if node.startswith("docs/"):
docsdir = "./"
homedir = ".."
else:
docsdir = "docs/"
homedir = "."
defvars = {
'ENABLE_HOSTED_EXTENSIONS': ENABLE_HOSTED_EXTENSIONS,
'SCHEMA_VERSION': SCHEMA_VERSION,
'SUBDOMAINS': SUBDOMAINS,
'sitemode': sitemode,
'sitename': SdoConfig.getname(),
'staticPath': homedir,
'extensionPath': makeUrl(getHostExt(),"",full=True),
'myhost': getHost(),
'myport': getHostPort(),
'mybasehost': getBaseHost(),
'host_ext': getHostExt(),
'extComment': extComment,
'docsdir': docsdir,
'homedir': homedir,
'extDD': extDD,
'extVers': extVers,
'extName': extName,
'targethost': makeUrl("","",full=True),
'debugging': getAppVar('debugging'),
'appengineVersion': getAppEngineVersion()
}
if values:
defvars.update(values)
template = JINJA_ENVIRONMENT.get_template(templateName)
return template.render(defvars)
def my_shutdown_hook():
global instance_num
if SHAREDSITEDEBUG:
Insts = memcache.get("ExitInstances")
if Insts:
Insts[os.environ["INSTANCE_ID"]] = 1
memcache.replace("ExitInstances",Insts)
memcache.add("Exits",0)
memcache.incr("Exits")
log.info("Instance[%s] shutting down" % modules.get_current_instance_id())
runtime.set_shutdown_hook(my_shutdown_hook)
def setHttpScheme(val):
setAppVar('httpScheme',val)
def getHttpScheme():
return getAppVar('httpScheme')
def setHostExt(val):
setAppVar('host_ext',val)
def getHostExt():
return getAppVar('host_ext')
def setSiteName(val):
setAppVar('sitename',val)
def getSiteName():
return getAppVar('sitename')
def setHost(val):
setAppVar('myhost',val)
def getHost():
return getAppVar('myhost')
def setBaseHost(val):
setAppVar('mybasehost',val)
def getBaseHost():
return getAppVar('mybasehost')
def setHostPort(val):
setAppVar('myport',val)
def getHostPort():
return getAppVar('myport')
def setArguments(val):
setAppVar('myarguments',val)
def getArguments():
return getAppVar('myarguments')
def makeUrl(ext="",path="",full=False,scheme=None):
port = ""
sub = ""
p = ""
if(getHostPort() != "80"):
port = ":%s" % getHostPort()
if ext != "core" and ext != "":
sub = "%s." % ext
if path != "":
if path.startswith("/"):
p = path
else:
p = "/%s" % path
if full:
if not scheme:
scheme = getHttpScheme()
targethost = os.environ.get("TARGETSITE",getBaseHost())
url = "%s://%s%s%s%s" % (scheme,sub,targethost,port,p)
else:
url = "%s" % (p)
return url
def getPageFromStore(id,ext=None,enableFlush=True):
cached = PageStore.get(id,ext)
if enableFlush and cached and "_pageFlush" in getArguments():
log.info("Reloading page for %s" % id)
PageStore.remove(id,ext)
cached = None
return cached
schemasInitialized = False
def load_schema_definitions(refresh=False):
global schemasInitialized
if not schemasInitialized or refresh:
log.info("STARTING UP... reading schemas.")
#load_graph(loadExtensions=ENABLE_HOSTED_EXTENSIONS)
if SdoConfig.isValid():
read_schemas(SdoConfig.termFiles())
load_usage_data(SdoConfig.countsFiles())
else:
read_local_schemas(loadExtensions=ENABLE_HOSTED_EXTENSIONS)
if ENABLE_HOSTED_EXTENSIONS:
read_extensions(ENABLED_EXTENSIONS)
schemasInitialized = True
LOADINGSOURCE = None
WAITSECS = 360
def load_sources():
global LOADINGSOURCE, LOADEDSOURCES,WAITSECS
if LOADEDSOURCES:
return
if LOADINGSOURCE: #Another thread may already be here
elapsedSecs = 0
while LOADINGSOURCE and elapsedSecs < WAITSECS:
time.sleep(0.1)
if LOADINGSOURCE: #If still loading, check timing and go around again
elapsed = datetime.datetime.now() - LOADINGSOURCE
elapsedSecs = elapsed.total_seconds()
if elapsedSecs >= WAITSECS: # Clear potential thread block caused by another thread crashing out leaving flags set
log.info("LOADINGSOURCE Thread blocked for over %s seconds - clearing lock" % WAITSECS)
LOADINGSOURCE = None
if not LOADEDSOURCES and not LOADINGSOURCE: # Check again in case things have changed in above loop
LOADINGSOURCE = datetime.datetime.now()
load_start = datetime.datetime.now()
load_schema_definitions()
log.info(("[%s] Term definitions load took %s " % (getInstanceId(short=True),(datetime.datetime.now() - load_start))))
load_start = datetime.datetime.now()
load_examples_data(ENABLED_EXTENSIONS)
log.info(("[%s] Examples load took %s " % (getInstanceId(short=True),(datetime.datetime.now() - load_start))))
LOADEDSOURCES=True
LOADINGSOURCE=None
if getInTestHarness():
load_sources()
else:
app = ndb.toplevel(webapp2.WSGIApplication([("/(.*)", ShowUnit)]))
Fixed error where jsonldcontext.{json|jsonld|json.txt} were returning with out the correct 'Content-Type:' settings
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import with_statement
import logging
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
import os
import re
import webapp2
import urllib2
import mimetypes
import jinja2
import logging
import StringIO
import json
import rdflib
#from rdflib.namespace import RDFS, RDF, OWL
#from rdflib.term import URIRef
from markupsafe import Markup, escape # https://pypi.python.org/pypi/MarkupSafe
import threading
import itertools
import datetime, time
from time import gmtime, strftime
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import users
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import modules
from google.appengine.api import runtime
from google.appengine.api import app_identity
from google.appengine.api.modules import modules
GAE_APP_ID = "appId"
GAE_VERSION_ID = "versionId"
#Testharness Used to indicate we are being called from tests - use setInTestHarness() & getInTestHarness() to manage value - defauluts to False (we are not in tests)
from testharness import *
from sdoutil import *
from api import *
from apirdfterm import *
from apirdflib import load_graph, getNss, getRevNss, buildSingleTermGraph, serializeSingleTermGrapth
from apirdflib import countTypes, countProperties, countEnums, graphFromFiles, getPathForPrefix, getPrefixForPath, rdfgettops
from apimarkdown import Markdown
from sdordf2csv import sdordf2csv
CONFIGFILE = os.environ.get("CONFIGFILE","sdoconfig.json")
SdoConfig.load(CONFIGFILE)
if not SdoConfig.valid:
log.error("Invalid config from '%s' or its includes !!" % CONFIGFILE)
os.exit()
SCHEMA_VERSION="3.5"
if not getInTestHarness():
GAE_APP_ID = app_identity.get_application_id()
GAE_VERSION_ID = modules.get_current_version_name()
FEEDBACK_FORM_BASE_URL='https://docs.google.com/a/google.com/forms/d/1krxHlWJAO3JgvHRZV9Rugkr9VYnMdrI10xbGsWt733c/viewform?entry.1174568178&entry.41124795={0}&entry.882602760={1}'
# {0}: term URL, {1} category of term.
sitemode = "mainsite" # whitespaced list for CSS tags,
# e.g. "mainsite testsite" when off expected domains
# "extensionsite" when in an extension (e.g. blue?)
releaselog = { "2.0": "2015-05-13", "2.1": "2015-08-06", "2.2": "2015-11-05", "3.0": "2016-05-04", "3.1": "2016-08-09", "3.2": "2017-03-23", "3.3": "2017-08-14", "3.4": "2018-06-15", "3.5": "2019-04-02" }
silent_skip_list = [ "favicon.ico" ] # Do nothing for now
all_layers = {}
ext_re = re.compile(r'([^\w,])+')
validNode_re = re.compile(r'^[\w\/.-]+$')
#TODO: Modes:
# mainsite
# webschemadev
# known extension (not skiplist'd, eg. demo1 on schema.org)
TEMPLATESDIR = SdoConfig.templateDir()
FileBasedTemplates = True
def urlTemplateLoader(name):
log.info("TEMPLATE LOADER LOOKING FOR: %s" % name)
url = TEMPLATESDIR + "/" + name
log.info("URL: %s" % url)
try:
fd = urllib2.urlopen(url)
res = fd.read()
except urllib2.URLError as e:
log.info("URLError %s" % e)
return None
return res
if TEMPLATESDIR:
if TEMPLATESDIR.startswith("file://"):
TEMPLATESDIR = TEMPLATESDIR[7:]
if "://" in TEMPLATESDIR:
FileBasedTemplates = False
else:
TEMPLATESDIR = os.path.join(os.path.dirname(__file__), 'templates')
log.info("No Templates directory defined - defaulting to %s" % TEMPLATESDIR)
if FileBasedTemplates:
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATESDIR),
extensions=['jinja2.ext.autoescape'], autoescape=True, cache_size=0)
else:
JINJA_ENVIRONMENT = jinja2.Environment(loader=jinja2.FunctionLoader(urlTemplateLoader),
extensions=['jinja2.ext.autoescape'], autoescape=True, cache_size=0)
CANONICALSCHEME = "http"
ENABLE_JSONLD_CONTEXT = True
ENABLE_CORS = True
ENABLE_HOSTED_EXTENSIONS = True
DISABLE_NDB_FOR_LOCALHOST = True
ENABLEMOREINFO = True
WORKINGHOSTS = ["schema.org","schemaorg.appspot.com",
"webschemas.org","webschemas-g.appspot.com",
"sdo-test.appspot.com",
"localhost"]
EXTENSION_SUFFIX = "" # e.g. "*"
CORE = 'core'
ATTIC = 'attic'
ENABLED_EXTENSIONS = [ATTIC, 'auto', 'bib', 'health-lifesci', 'pending', 'meta', 'iot' ]
#### Following 2 lines look odd - leave them as is - just go with it!
ALL_LAYERS = [CORE,'']
ALL_LAYERS += ENABLED_EXTENSIONS
####
ALL_LAYERS_NO_ATTIC = list(ALL_LAYERS)
ALL_LAYERS_NO_ATTIC.remove(ATTIC)
setAllLayersList(ALL_LAYERS)
OUTPUTDATATYPES = [".csv",".jsonld",".ttl",".rdf",".xml",".nt"]
FORCEDEBUGGING = False
# FORCEDEBUGGING = True
SHAREDSITEDEBUG = True
if getInTestHarness():
SHAREDSITEDEBUG = False
LOADEDSOURCES = False
noindexpages = True
SUBDOMAINS = True
subs = os.environ.get("SUBDOMAINS",None)
if subs:
if subs.lower() == "true":
SUBDOMAINS = True
elif subs.lower() == "false":
SUBDOMAINS = False
else:
log.info("SUBDOMAINS set to invalid value %s - defaulting to %s" %(subs,SUBDOMAINS))
log.info("SUBDOMAINS set to %s" % SUBDOMAINS)
############# Warmup Control ########
WarmedUp = False
WarmupState = "Auto"
if "WARMUPSTATE" in os.environ:
WarmupState = os.environ["WARMUPSTATE"]
log.info("[%s] WarmupState: %s" % (getInstanceId(short=True),WarmupState))
if WarmupState.lower() == "off":
WarmedUp = True
elif "SERVER_NAME" in os.environ and ("localhost" in os.environ['SERVER_NAME'] and WarmupState.lower() == "auto"):
WarmedUp = True
############# Shared values and times ############
#### Memcache functions dissabled in test mode ###
appver = "TestHarness Version"
if "CURRENT_VERSION_ID" in os.environ:
appver = os.environ["CURRENT_VERSION_ID"]
def getAppEngineVersion():
ret = ""
if not getInTestHarness():
from google.appengine.api.modules.modules import get_current_version_name
ret = get_current_version_name()
#log.info("AppEngineVersion '%s'" % ret)
else:
return "TestVersion"
return ret
instance_first = True
instance_num = 0
callCount = 0
global_vars = threading.local()
starttime = datetime.datetime.utcnow()
systarttime = starttime
modtime = starttime
etagSlug = ""
if not getInTestHarness():
from google.appengine.api import memcache
class SlugEntity(ndb.Model):
slug = ndb.StringProperty()
modtime = ndb.DateTimeProperty()
def setmodiftime(sttime):
global modtime, etagSlug
if not getInTestHarness():
modtime = sttime.replace(microsecond=0)
etagSlug = "24751%s" % modtime.strftime("%y%m%d%H%M%Sa")
log.debug("set slug: %s" % etagSlug)
slug = SlugEntity(id="ETagSlug",slug=etagSlug, modtime=modtime)
slug.put()
def getmodiftime():
global modtime, etagSlug
if not getInTestHarness():
slug = SlugEntity.get_by_id("ETagSlug")
if not slug:#Occationally memcache will loose the value and result in becomming Null value
systarttime = datetime.datetime.utcnow()
tick()
setmodiftime(systarttime)#Will store it again
slug = SlugEntity.get_by_id("ETagSlug")
modtime = slug.modtime
etagSlug = str(slug.slug)
return modtime
def getslug():
global etagSlug
getmodiftime()
return etagSlug
def tick(): #Keep memcache values fresh so they don't expire
if not getInTestHarness():
memcache.set(key="SysStart", value=systarttime)
memcache.set(key="static-version", value=appver)
def check4NewVersion():
ret = False
dep = None
try:
fpath = os.path.join(os.path.split(__file__)[0], 'admin/deploy_timestamp.txt')
#log.info("fpath: %s" % fpath)
with open(fpath, 'r') as f:
dep = f.read()
dep = dep.replace("\n","")
f.close()
except Exception as e:
log.info("ERROR reading: %s" % e)
pass
if getInTestHarness() or "localhost" in os.environ['SERVER_NAME']: #Force new version logic for local versions and tests
ret = True
log.info("Assuming new version for local/test instance")
else:
stored,info = getTimestampedInfo("deployed-timestamp")
if stored != dep:
ret = True
return ret, dep
def storeNewTimestamp(stamp=None):
storeTimestampedInfo("deployed-timestamp",stamp)
def storeInitialisedTimestamp(stamp=None):
storeTimestampedInfo("initialised-timestamp",stamp)
if getInTestHarness():
load_examples_data(ENABLED_EXTENSIONS)
else: #Ensure clean start for any memcached or ndb store values...
changed, dep = check4NewVersion()
if changed: #We are a new instance of the app
msg = "New app instance [%s:%s] detected - FLUSHING CACHES. (deploy_timestamp='%s')\nLoaded Config file from: %s" % (GAE_VERSION_ID,GAE_APP_ID,dep,CONFIGFILE)
memcache.flush_all()
storeNewTimestamp(dep)
sdo_send_mail(to="rjw@dataliberate.com",subject="[SCHEMAINFO] from 'sdoapp'", msg=msg)
log.info("%s" % msg)
load_start = datetime.datetime.now()
systarttime = datetime.datetime.utcnow()
memcache.set(key="app_initialising", value=True, time=300) #Give the system 5 mins - auto remove flag in case of crash
memcache.set(key="static-version", value=appver)
memcache.add(key="SysStart", value=systarttime)
instance_first = True
cleanmsg = CacheControl.clean()
log.info("Clean count(s): %s" % cleanmsg)
log.info(("[%s] Cache clean took %s " % (getInstanceId(short=True),(datetime.datetime.now() - load_start))))
load_start = datetime.datetime.now()
tick()
memcache.set(key="app_initialising", value=False)
log.debug("[%s] Awake >>>>>>>>>>>." % (getInstanceId(short=True)))
storeInitialisedTimestamp()
else:
time.sleep(0.5) #Give time for the initialisation flag (possibly being set in another thread/instance) to be set
WAITCOUNT = 180
waittime = WAITCOUNT
while waittime > 0:
waittime -= 1
flag = memcache.get("app_initialising")
if not flag or flag == False: #Initialised or value missing
break
log.debug("[%s] Waited %s seconds for intialisation to end memcahce value = %s" % (getInstanceId(short=True),
(WAITCOUNT - waittime),memcache.get("app_initialising")))
time.sleep(1)
if waittime <= 0:
log.info("[%s] Waited %s seconds for intialisation to end - proceeding anyway!" % (getInstanceId(short=True),WAITCOUNT))
log.debug("[%s] End of waiting !!!!!!!!!!." % (getInstanceId(short=True)))
tick()
systarttime = memcache.get("SysStart")
if(not systarttime): #Occationally memcache will loose the value and result in systarttime becomming Null value
systarttime = datetime.datetime.utcnow()
tick()
setmodiftime(systarttime)
#################################################
def cleanPath(node):
"""Return the substring of a string matching chars approved for use in our URL paths."""
return re.sub(r'[^a-zA-Z0-9\-/,\.]', '', str(node), flags=re.DOTALL)
class HTMLOutput:
"""Used in place of http response when we're collecting HTML to pass to template engine."""
def __init__(self):
self.outputStrings = []
def write(self, str):
self.outputStrings.append(str)
def toHTML(self):
return Markup ( "".join(self.outputStrings) )
def __str__(self):
return self.toHTML()
# Core API: we have a single schema graph built from triples and units.
# now in api.py
class TypeHierarchyTree:
def __init__(self, prefix=""):
self.txt = ""
self.visited = []
self.prefix = prefix
def emit(self, s):
self.txt += s + "\n"
def emit2buff(self, buff, s):
buff.write(s + "\n")
def toHTML(self):
return '%s<ul>%s</ul>' % (self.prefix, self.txt)
def toJSON(self):
return self.txt
def traverseForHTML(self, term, depth = 1, hashorslash="/", layers='core', idprefix="", urlprefix="", traverseAllLayers=False, buff=None):
"""Generate a hierarchical tree view of the types. hashorslash is used for relative link prefixing."""
#log.info("traverseForHTML: node=%s hashorslash=%s" % ( term, hashorslash ))
if not term:
return False
if term.superseded() or term.getLayer() == ATTIC:
return False
localBuff = False
if buff == None:
localBuff = True
buff = StringIO.StringIO()
home = term.getLayer()
gotOutput = True
if home in ENABLED_EXTENSIONS and home != getHostExt():
urlprefix = makeUrl(home)
extclass = ""
extflag = ""
tooltip=""
if home != "core" and home != "":
extclass = "class=\"ext ext-%s\"" % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Extended schema: %s.schema.org\" " % home
# we are a supertype of some kind
subTypes = term.getSubs()
idstring = idprefix + term.getId()
if len(subTypes) > 0:
# and we haven't been here before
if term.getId() not in self.visited:
self.emit2buff(buff, ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s' % (" " * 4 * depth, idstring, tooltip, extclass, urlprefix, hashorslash, term.getId(), term.getId(), extflag) )
self.emit2buff(buff, ' %s<ul>' % (" " * 4 * depth))
# handle our subtypes
for item in subTypes:
subBuff = StringIO.StringIO()
got = self.traverseForHTML(item, depth + 1, hashorslash=hashorslash, layers=layers, idprefix=idprefix, urlprefix=urlprefix, traverseAllLayers=traverseAllLayers,buff=subBuff)
if got:
self.emit2buff(buff,subBuff.getvalue())
subBuff.close()
self.emit2buff(buff, ' %s</ul>' % (" " * 4 * depth))
else:
# we are a supertype but we visited this type before, e.g. saw Restaurant via Place then via Organization
seencount = self.visited.count(term.getId())
idstring = "%s%s" % (idstring, "+" * seencount)
seen = ' <a href="#%s">+</a> ' % term.getId()
self.emit2buff(buff, ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * 4 * depth, idstring, tooltip, extclass, urlprefix, hashorslash, term.getId(), term.getId(), extflag, seen) )
# leaf nodes
if len(subTypes) == 0:
gotOutput = True
seen = ""
if term.getId() in self.visited:
seencount = self.visited.count(term.getId())
idstring = "%s%s" % (idstring, "+" * seencount)
seen = ' <a href="#%s">+</a> ' % term.getId()
self.emit2buff(buff, '%s<li class="tleaf" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * depth, idstring, tooltip, extclass, urlprefix, hashorslash, term.getId(), term.getId(), extflag, seen ))
self.visited.append(term.getId()) # remember our visit
self.emit2buff(buff, ' %s</li>' % (" " * 4 * depth) )
if localBuff:
self.emit(buff.getvalue())
buff.close()
return gotOutput
# based on http://danbri.org/2013/SchemaD3/examples/4063550/hackathon-schema.js - thanks @gregg, @sandro
def traverseForJSONLD(self, term, depth = 0, last_at_this_level = True, supertype="None", layers='core'):
emit_debug = False
if not term or not term.getId():
log.error("Error None value passed to traverseForJSONLD()")
return
if term.getId() in self.visited:
# self.emit("skipping %s - already visited" % node.id)
return
self.visited.append(term.getId())
p1 = " " * 4 * depth
if emit_debug:
self.emit("%s# @id: %s last_at_this_level: %s" % (p1, term.getId(), last_at_this_level))
global namespaces;
ctx = "{}".format(""""@context": {
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"schema": "http://schema.org/",
"rdfs:subClassOf": { "@type": "@id" },
"name": "rdfs:label",
"description": "rdfs:comment",
"children": { "@reverse": "rdfs:subClassOf" }
},\n""" if last_at_this_level and depth==0 else '' )
unseen_subtypes = []
for st in term.getSubs():
if not st.getId() in self.visited:
unseen_subtypes.append(st)
unvisited_subtype_count = len(unseen_subtypes)
subtype_count = len( term.getSubs() )
supertx = "{}".format( '"rdfs:subClassOf": "schema:%s", ' % supertype.getId() if supertype != "None" else '' )
maybe_comma = "{}".format("," if unvisited_subtype_count > 0 else "")
comment = term.getComment().strip()
comment = ShortenOnSentence(StripHtmlTags(comment),60)
def encode4json(s):
return json.dumps(s)
self.emit('\n%s{\n%s\n%s"@type": "rdfs:Class", %s "description": %s,\n%s"name": "%s",\n%s"@id": "schema:%s",\n%s"layer": "%s"%s'
% (p1, ctx, p1, supertx, encode4json(comment), p1, term.getId(), p1, term.getId(), p1, term.getLayer(), maybe_comma))
i = 1
if unvisited_subtype_count > 0:
self.emit('%s"children": ' % p1 )
self.emit(" %s[" % p1 )
inner_lastness = False
for t in unseen_subtypes:
if emit_debug:
self.emit("%s # In %s > %s i: %s unvisited_subtype_count: %s" %(p1, node.id, t.id, i, unvisited_subtype_count))
if i == unvisited_subtype_count:
inner_lastness = True
i = i + 1
self.traverseForJSONLD(t, depth + 1, inner_lastness, supertype=term, layers=layers)
self.emit("%s ]%s" % (p1, "{}".format( "" if not last_at_this_level else '' ) ) )
maybe_comma = "{}".format( ',' if not last_at_this_level else '' )
self.emit('\n%s}%s\n' % (p1, maybe_comma))
def GetExamples(term, layers='core'):
"""Returns the examples (if any) for some Unit node."""
return LoadTermExamples(term)
def GetExtMappingsRDFa(term):
"""Self-contained chunk of RDFa HTML markup with mappings for this term."""
equivs = term.getEquivalents()
if (term.isClass()):
if len(equivs) > 0:
markup = ''
for c in equivs:
if (c.startswith('http')):
markup = markup + "<link property=\"owl:equivalentClass\" href=\"%s\"/>\n" % c
else:
markup = markup + "<link property=\"owl:equivalentClass\" resource=\"%s\"/>\n" % c
return markup
if (term.isProperty()):
if len(equivs) > 0:
markup = ''
for c in equivs:
markup = markup + "<link property=\"owl:equivalentProperty\" href=\"%s\"/>\n" % c
return markup
return "<!-- no external mappings noted for this term. -->"
class ShowUnit (webapp2.RequestHandler):
"""ShowUnit exposes schema.org terms via Web RequestHandler
(HTML/HTTP etc.).
"""
def emitCacheHeaders(self):
"""Send cache-related headers via HTTP."""
if "CACHE_CONTROL" in os.environ:
log.info("Setting http cache control to '%s' from .yaml" % os.environ["CACHE_CONTROL"])
self.response.headers['Cache-Control'] = os.environ["CACHE_CONTROL"]
else:
self.response.headers['Cache-Control'] = "public, max-age=600" # 10m
self.response.headers['Vary'] = "Accept, Accept-Encoding"
def write(self, str):
"""Write some text to Web server's output stream."""
self.outputStrings.append(str)
def moreInfoBlock(self, term, layer='core'):
# if we think we have more info on this term, show a bulleted list of extra items.
moreblock = os.environ.get("MOREBLOCK")
if not moreblock or (moreblock.lower() == "false"):
return ""
# defaults
bugs = ["No known open issues."]
mappings = ["No recorded schema mappings."]
items = bugs + mappings
feedback_url = FEEDBACK_FORM_BASE_URL.format(term.getUri, term.getType())
items = [
self.emitCanonicalURL(term),
self.emitEquivalents(term),
"<a href='{0}'>Leave public feedback on this term 💬</a>".format(feedback_url),
"<a href='https://github.com/schemaorg/schemaorg/issues?q=is%3Aissue+is%3Aopen+{0}'>Check for open issues.</a>".format(term.getId())
]
if term.getLayer() != "core":
items.append("'{0}' is mentioned in the <a href='{1}'>{2}</a> extention.".format( term.getId(), makeUrl(term.getLayer(),"",full=True), term.getLayer() ))
moreinfo = """<div>
<div id='infobox' style='text-align: right;' role="checkbox" aria-checked="false"><label for="morecheck"><b><span style="cursor: pointer;">[more...]</span></b></label></div>
<input type='checkbox' checked="checked" style='display: none' id=morecheck><div id='infomsg' style='background-color: #EEEEEE; text-align: left; padding: 0.5em;'>
<ul>"""
for i in items:
if i and len(i):
moreinfo += "<li>%s</li>" % i
# <li>mappings to other terms.</li>
# <li>or links to open issues.</li>
moreinfo += "</ul>\n</div>\n</div>\n"
return moreinfo
def ml(self, term, label='', title='', prop='', hashorslash='/'):
"""ml ('make link')
Returns an HTML-formatted link to the class or property URL
* label = optional anchor text label for the link
* title = optional title attribute on the link
* prop = an optional property value to apply to the A element
"""
if not term:
return ""
if ":" in term.getId():
return self.external_ml(term,title=title, prop=prop)
if label=='':
label = term.getLabel()
if title != '':
title = " title=\"%s\"" % (title)
if prop:
prop = " property=\"%s\"" % (prop)
rdfalink = ''
if prop:
rdfalink = '<link %s href="%s%s" />' % (prop,api.SdoConfig.vocabUri(),label)
if(term.id == "DataType"): #Special case
return "%s<a href=\"%s\">%s</a>" % (rdfalink,term.getId(), term.getId())
urlprefix = "."
home = term.getLayer()
if home in ENABLED_EXTENSIONS and home != getHostExt():
port = ""
if getHostPort() != "80":
port = ":%s" % getHostPort()
urlprefix = makeUrl(home,full=True)
extclass = ""
extflag = ""
tooltip = ""
if home != "core" and home != "":
if home != "meta":
extclass = "class=\"ext ext-%s\" " % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Defined in extension: %s.schema.org\" " % home
return "%s<a %s %s href=\"%s%s%s\"%s>%s</a>%s" % (rdfalink,tooltip, extclass, urlprefix, hashorslash, term.getId(), title, label, extflag)
#return "<a %s %s href=\"%s%s%s\"%s%s>%s</a>%s" % (tooltip, extclass, urlprefix, hashorslash, node.id, prop, title, label, extflag)
def external_ml(self, term, title='', prop=''):
#log.info("EXTERNAL!!!! %s %s " % (term.getLabel(),term.getId()))
name = term.getId()
if not ":" in name:
return name
if name.startswith("http") and '#' in name:
x = name.split("#")
path = x[0] + "#"
val = x[1]
voc = getPrefixForPath(path)
elif name.startswith("http"):
val = os.path.basename(name)
path = name[:len(name) - len(val)]
voc = getPrefixForPath(path)
else:
x = name.split(":")
voc = x[0]
val = x[1]
path = getPathForPrefix(voc)
if path:
if not path.endswith("#") and not path.endswith("/"):
path += "/"
if title != '':
title = " title=\"%s\"" % str(title)
if prop:
prop = " property=\"%s\"" % (prop)
rdfalink = ''
if prop:
rdfalink = '<link %s href="%s%s" />' % (prop,api.SdoConfig.vocabUri(),label)
return "%s<a %s href=\"%s%s\" class=\"externlink\" target=\"_blank\">%s:%s</a>" % (rdfalink,title,path,val,voc,val)
def makeLinksFromArray(self, nodearray, tooltip=''):
"""Make a comma separate list of links via ml() function.
* tooltip - optional text to use as title of all links
"""
hyperlinks = []
for f in nodearray:
hyperlinks.append(self.ml(f, f.id, tooltip))
return (", ".join(hyperlinks))
def emitUnitHeaders(self, term, layers='core'):
"""Write out the HTML page headers for this node."""
self.write("<h1 property=\"rdfs:label\" class=\"page-title\">")
self.write(term.getLabel())
self.write("</h1>\n")
home = term.getLayer()
if home != "core" and home != "":
exthome = "%s.schema.org" % home
exthomeurl = uri = makeUrl(home,"/",full=True)
linktext = "Defined in the %s section."
lt = SdoConfig.getDescriptor(home,"linktext")
if lt:
if lt.count("%s") != 1:
log.error("ERROR Linktext '%s' includes %s '%%s' - only 1 permitted" % (lt,lt.count()))
else:
linktext = lt
t = SdoConfig.getDescriptor(home,"disambiguatingDescription")
linkinsert = "<a title=\"%s\" href=\"%s\">%s</a>" % (t,exthomeurl,home)
self.write("<span class=\"extlink\">")
self.write(linktext % linkinsert)
self.write("<br/></span>")
if not ENABLEMOREINFO:
self.write(self.emitCanonicalURL(term))
eq = self.emitEquivalents(term)
if eq and len(eq):
self.write()
self.BreadCrumbs(term)
comment = term.getComment()
self.write(" <div property=\"rdfs:comment\">%s</div>\n\n" % (comment) + "\n")
usage = GetUsage(term.getId())
#if len(usage):
# self.write(" <br/><div>Usage: %s</div>\n\n" % (usage) + "\n")
if ENABLEMOREINFO:
self.write(self.moreInfoBlock(term))
def emitCanonicalURL(self,term):
out = ""
site = SdoConfig.vocabUri()
if site != "http://schema.org":
cURL = "%s%s" % (site,term.getId())
output = " <span class=\"canonicalUrl\">Canonical URL: %s</span> " % (cURL)
else:
cURL = "%s://schema.org/%s" % (CANONICALSCHEME,term.getId())
if CANONICALSCHEME == "http":
other = "https"
else:
other = "http"
sa = '\n<link property="sameAs" href="%s://schema.org/%s" />' % (other,term.getId())
self.write(sa)
output = " <span class=\"canonicalUrl\">Canonical URL: <a href=\"%s\">%s</a></span> " % (cURL, cURL)
return output
def emitEquivalents(self,term):
buff = StringIO.StringIO()
equivs = term.getEquivalents()
if len(equivs) > 0:
if (term.isClass() or term.isDataType()):
label = "Equivalent Class:"
else:
label = "Equivalent Property:"
br = ""
for e in equivs:
eq = VTerm.getTerm(e,createReference=True)
log.info("EQUIVALENT %s %s" % (e,eq))
title = eq.getUri()
buff.write("%s<span class=\"equivalents\">%s %s</span> " % (br,label,self.ml(eq,title=title)))
br = "<br/>"
return buff.getvalue()
# Stacks to support multiple inheritance
crumbStacks = []
def BreadCrumbs(self, term):
self.crumbStacks = term.getParentPaths()
for cstack in self.crumbStacks:
if term.isProperty():
cstack.append(VTerm.getTerm("http://schema.org/Property"))
cstack.append(VTerm.getTerm("http://schema.org/Thing"))
elif term.isDataType() and not term.id == "DataType":
cstack.append(VTerm.getTerm("http://schema.org/DataType"))
enuma = term.isEnumerationValue()
crumbsout = []
for row in range(len(self.crumbStacks)):
thisrow = ""
targ = self.crumbStacks[row][len(self.crumbStacks[row])-1]
if not targ:
continue
count = 0
while(len(self.crumbStacks[row]) > 0):
propertyval = None
n = self.crumbStacks[row].pop()
if((len(self.crumbStacks[row]) == 1) and n and
not ":" in n.id) : #penultimate crumb that is not a non-schema reference
if term.isProperty():
if n.isProperty(): #Can only be a subproperty of a property
propertyval = "rdfs:subPropertyOf"
else:
propertyval = "rdfs:subClassOf"
if(count > 0):
if((len(self.crumbStacks[row]) == 0) and enuma): #final crumb
thisrow += " :: "
else:
thisrow += " > "
count += 1
thisrow += "%s" % (self.ml(n,prop=propertyval))
crumbsout.append(thisrow)
self.write("<h4>")
rowcount = 0
for crumb in sorted(crumbsout):
if rowcount > 0:
self.write("<br/>")
self.write("<span class='breadcrumbs'>%s</span>\n" % crumb)
rowcount += 1
self.write("</h4>\n")
#Walk up the stack, appending crumbs & create new (duplicating crumbs already identified) if more than one parent found
def WalkCrumbs(self, term, cstack):
if ":" in term.getId(): #Suppress external class references
return
cstack.append(term)
tmpStacks = []
tmpStacks.append(cstack)
supers = term.getSupers()
for i in range(len(supers)):
if(i > 0):
t = cstack[:]
tmpStacks.append(t)
self.crumbStacks.append(t)
x = 0
for p in supers:
self.WalkCrumbs(p,tmpStacks[x])
x += 1
def emitSimplePropertiesPerType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties applicable to the specified type."""
if not out:
out = self
out.write("<ul class='props4type'>")
for prop in VTerm.getTerm(cl).getProperties():
if prop.superseded():
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.getId(), prop.getId() ))
out.write("</ul>\n\n")
def emitSimplePropertiesIntoType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties whose values are the specified type."""
if not out:
out = self
out.write("<ul class='props2type'>")
for prop in VTerm.getTerm(cl).getTargetOf():
if prop.superseded():
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.getId(), prop.getId() ))
out.write("</ul>\n\n")
def hideAtticTerm(self,term):
if getHostExt() == ATTIC:
return False
if term.inLayers([ATTIC]):
return True
return False
def ClassProperties (self, cl, subclass=False, term=None, out=None, hashorslash="/"):
"""Write out a table of properties for a per-type page."""
if not out:
out = self
propcount = 0
headerPrinted = False
props = cl.getProperties()
for prop in props:
if prop.superseded() or self.hideAtticTerm(prop):
continue
olderprops = prop.getSupersedes()
inverseprop = prop.getInverseOf()
ranges = prop.getRanges()
doms = prop.getDomains()
comment = prop.getComment()
if ":" in prop.id and comment == "-":
comment = "Term from external vocabulary"
if not getAppVar("tableHdr"):
setAppVar("tableHdr",True)
if ((term.isClass() or term.isEnumeration()) and not term.isDataType() and term.id != "DataType"):
self.write("<table class=\"definition-table\">\n <thead>\n <tr><th>Property</th><th>Expected Type</th><th>Description</th> \n </tr>\n </thead>\n\n")
self.tablehdr = True
if (not headerPrinted):
class_head = self.ml(cl)
out.write("<tr class=\"supertype\">\n <th class=\"supertype-name\" colspan=\"3\">Properties from %s</th>\n \n</tr>\n\n<tbody class=\"supertype\">\n " % (class_head))
headerPrinted = True
out.write("<tr typeof=\"rdfs:Property\" resource=\"%s\">\n \n <th class=\"prop-nam\" scope=\"row\">\n\n<code property=\"rdfs:label\">%s</code>\n </th>\n " % (prop.getUri(), self.ml(prop)))
out.write("<td class=\"prop-ect\">\n")
first_range = True
for r in ranges:
if (not first_range):
out.write(" or <br/> ")
first_range = False
out.write(self.ml(r, prop='rangeIncludes'))
out.write(" ")
for d in doms:
out.write("<link property=\"domainIncludes\" href=\"%s\">" % d.getUri())
out.write("</td>")
out.write("<td class=\"prop-desc\" property=\"rdfs:comment\">%s" % (comment))
if (olderprops and len(olderprops) > 0):
olderlinks = ", ".join([self.ml(o) for o in olderprops])
out.write(" Supersedes %s." % olderlinks )
if (inverseprop != None):
out.write("<br/> Inverse property: %s." % (self.ml(inverseprop)))
out.write("</td></tr>")
subclass = False
propcount += 1
if subclass: # in case the superclass has no defined attributes
out.write("<tr><td colspan=\"3\"></td></tr>")
return propcount
def emitClassExtensionSuperclasses (self, cl, layers="core", out=None):
first = True
count = 0
if not out:
out = self
buff = StringIO.StringIO()
#log.info("SUPERS %s" % VTerm.term2str(cl.getSupers()))
for p in cl.getSupers():
if not p.isReference() and p.inLayers(layers):
continue
sep = ", "
if first:
sep = "<li>"
first = False
buff.write("%s%s" % (sep,self.ml(p)))
count += 1
if(count > 0):
buff.write("</li>\n")
content = buff.getvalue()
if(len(content) > 0):
if cl.getId() == "DataType":
self.write("<h4>Subclass of:<h4>")
else:
self.write("<h4>Available supertypes defined elsewhere</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
""" def emitClassExtensionProperties (self, cl, layers="core", out=None):
if not out:
out = self
buff = StringIO.StringIO()
for p in self.parentStack:
self._ClassExtensionProperties(buff, p, layers=layers)
content = buff.getvalue()
if(len(content) > 0):
self.write("<h4>Available properties in extensions</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
"""
def _ClassExtensionProperties (self, out, cl, layers="core"):
"""Write out a list of properties not displayed as they are in extensions for a per-type page."""
di = Unit.GetUnit("schema:domainIncludes")
targetlayers=self.appropriateLayers(layers)
#log.info("Appropriate targets %s" % targetlayers)
exts = {}
for prop in sorted(GetSources(di, cl, targetlayers), key=lambda u: u.id):
if ":" in prop.id:
continue
if (prop.superseded(layers=targetlayers)):
continue
if inLayer(layers,prop): #Already in the correct layer - no need to report
continue
if inLayer("meta",prop): #Suppress mentioning properties from the 'meta' extension.
continue
ext = prop.getHomeLayer()
if not ext in exts.keys():
exts[ext] = []
exts[ext].append(prop)
for e in sorted(exts.keys()):
count = 0
first = True
for p in sorted(exts[e], key=lambda u: u.id):
sep = ", "
if first:
out.write("<li>For %s in the <a href=\"%s\">%s</a> extension: " % (self.ml(cl),makeUrl(e,""),e))
sep = ""
first = False
out.write("%s%s" % (sep,self.ml(p)))
count += 1
if(count > 0):
out.write("</li>\n")
def emitClassIncomingProperties (self, term, out=None, hashorslash="/"):
"""Write out a table of incoming properties for a per-type page."""
if not out:
out = self
headerPrinted = False
props = term.getTargetOf()
for prop in props:
if (prop.superseded()):
continue
supersedes = prop.getSupersedes()
inverseprop = prop.getInverseOf()
ranges = prop.getRanges()
domains = prop.getDomains()
comment = prop.getComment()
if (not headerPrinted):
self.write("<br/><br/><div id=\"incoming\">Instances of %s may appear as values for the following properties</div><br/>" % (self.ml(term)))
self.write("<table class=\"definition-table\">\n \n \n<thead>\n <tr><th>Property</th><th>On Types</th><th>Description</th> \n </tr>\n</thead>\n\n")
headerPrinted = True
self.write("<tr>\n<th class=\"prop-nam\" scope=\"row\">\n <code>%s</code>\n</th>\n " % (self.ml(prop)) + "\n")
self.write("<td class=\"prop-ect\">\n")
first_dom = True
for d in domains:
if (not first_dom):
self.write(" or<br/> ")
first_dom = False
self.write(self.ml(d))
self.write(" ")
self.write("</td>")
self.write("<td class=\"prop-desc\">%s " % (comment))
if supersedes:
self.write(" Supersedes")
first = True
for s in supersedes:
if first:
first = False
self.write(",")
self.write(" %s" % self.ml(s))
self.write(". ")
if inverseprop:
self.write("<br/> inverse property: %s." % (self.ml(inverseprop)) )
self.write("</td></tr>")
if (headerPrinted):
self.write("</table>\n")
def emitRangeTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of this property's expected types."""
if not out:
out = self
out.write("<ul class='attrrangesummary'>")
for rt in VTerm.getTerm(node).getRanges():
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, rt.getId(), rt.getId() ))
out.write("</ul>\n\n")
def emitDomainTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of types that expect this property."""
if not out:
out = self
out.write("<ul class='attrdomainsummary'>")
for dt in VTerm.getTerm(node).getDomains():
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, dt.getId(), dt.getId() ))
out.write("</ul>\n\n")
def emitAttributeProperties(self, term, out=None, hashorslash="/"):
"""Write out properties of this property, for a per-property page."""
if not out:
out = self
ranges = term.getRanges()
domains =term.getDomains()
inverseprop = term.getInverseOf()
subprops = term.getSubs()
superprops = term.getSupers()
if (inverseprop != None):
tt = "This means the same thing, but with the relationship direction reversed."
out.write("<p>Inverse-property: %s.</p>" % (self.ml(inverseprop, inverseprop.getId(),tt, prop=False, hashorslash=hashorslash)) )
out.write("<table class=\"definition-table\">\n")
out.write("<thead>\n <tr>\n <th>Values expected to be one of these types</th>\n </tr>\n</thead>\n\n <tr>\n <td>\n ")
first_range = True
for r in ranges:
if (not first_range):
out.write("<br/>")
first_range = False
tt = "The '%s' property has values that include instances of the '%s' type." % (term.getId(), r.getId())
out.write(" <code>%s</code> " % (self.ml(r, r.getId(), tt, prop="rangeIncludes", hashorslash=hashorslash) +"\n"))
out.write(" </td>\n </tr>\n</table>\n\n")
first_domain = True
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Used on these types</th>\n </tr>\n</thead>\n<tr>\n <td>")
for d in domains:
if (not first_domain):
out.write("<br/>")
first_domain = False
tt = "The '%s' property is used on the '%s' type." % (term.getId(), d.getId())
out.write("\n <code>%s</code> " % (self.ml(d, d.getId(), tt, prop="domainIncludes",hashorslash=hashorslash)+"\n" ))
out.write(" </td>\n </tr>\n</table>\n\n")
# Sub-properties
if (subprops != None and len(subprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Sub-properties</th>\n </tr>\n</thead>\n")
for sp in subprops:
c = ShortenOnSentence(StripHtmlTags( sp.getComment() ),60)
tt = "%s: ''%s''" % ( sp.getId(), c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(sp, sp.getId(), tt, hashorslash=hashorslash)))
out.write("\n</table>\n\n")
# Super-properties
if (superprops != None and len(superprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Super-properties</th>\n </tr>\n</thead>\n")
for sp in superprops:
c = ShortenOnSentence(StripHtmlTags( sp.getComment() ),60)
tt = "%s: ''%s''" % ( sp.getId(), c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(sp, sp.getId(), tt, hashorslash=hashorslash)))
out.write("\n</table>\n\n")
def emitSupersedes(self, term, out=None, hashorslash="/"):
"""Write out Supersedes and/or Superseded by for this term"""
if not out:
out = self
newerprop = term.getSupersededBy() # None of one. e.g. we're on 'seller'(new) page, we get 'vendor'(old)
#olderprop = node.supersedes(layers=layers) # None or one
olderprops = term.getSupersedes()
# Supersedes
if (olderprops != None and len(olderprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Supersedes</th>\n </tr>\n</thead>\n")
for o in olderprops:
c = ShortenOnSentence(StripHtmlTags( o.getComment() ),60)
tt = "%s: ''%s''" % ( o.getId(), c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(o, o.getId(), tt)))
log.info("Super %s" % o.getId())
out.write("\n</table>\n\n")
# supersededBy (at most one direct successor)
if (newerprop != None):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th><a href=\"/supersededBy\">supersededBy</a></th>\n </tr>\n</thead>\n")
c = ShortenOnSentence(StripHtmlTags( newerprop.getComment() ),60)
tt = "%s: ''%s''" % ( newerprop.getId(), c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(newerprop, newerprop.getId(), tt)))
out.write("\n</table>\n\n")
def rep(self, markup):
"""Replace < and > with HTML escape chars."""
m1 = re.sub("<", "<", markup)
m2 = re.sub(">", ">", m1)
# TODO: Ampersand? Check usage with examples.
return m2
def handleHomepage(self, node):
"""Send the homepage, or if no HTML accept header received and JSON-LD was requested, send JSON-LD context file.
typical browser accept list: ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
# e.g. curl -H "Accept: application/ld+json" http://localhost:8080/
see also http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
https://github.com/rvguha/schemaorg/issues/5
https://github.com/rvguha/schemaorg/wiki/JsonLd
"""
accept_header = self.request.headers.get('Accept')
if accept_header:
accept_header = accept_header.split(',')
else:
accept_header = ""
# Homepage is content-negotiated. HTML or JSON-LD.
mimereq = {}
for ah in accept_header:
ah = re.sub( r";q=\d?\.\d+", '', ah).rstrip()
mimereq[ah] = 1
html_score = mimereq.get('text/html', 5)
xhtml_score = mimereq.get('application/xhtml+xml', 5)
jsonld_score = mimereq.get('application/ld+json', 10)
json_score = mimereq.get('application/json', 10)
log.info( "accept_header: " + str(accept_header) + " mimereq: "+str(mimereq) + "Scores H:{0} XH:{1} JL:{2} J:{3}".format(html_score,xhtml_score,jsonld_score,json_score))
if (ENABLE_JSONLD_CONTEXT and ((jsonld_score < html_score and jsonld_score < xhtml_score) or (json_score < html_score and json_score < xhtml_score))):
self.response.set_status(302,"Found")
if jsonld_score < json_score:
self.response.headers['Location'] = makeUrl("","docs/jsonldcontext.jsonld")
else:
self.response.headers['Location'] = makeUrl("","docs/jsonldcontext.json")
self.emitCacheHeaders()
return False #don't cache this redirect
else:
# Serve a homepage from template
# the .tpl has responsibility for extension homepages
# TODO: pass in extension, base_domain etc.
#sitekeyedhomepage = "homepage %s" % getSiteName()
ext = getHostExt()
if ext == "core":
ext = ""
if len(ext):
ext += "."
sitekeyedhomepage = "%sindex.html" % ext
hp = getPageFromStore(sitekeyedhomepage)
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
if hp:
self.response.out.write( hp )
#log.info("Served datacache homepage.tpl key: %s" % sitekeyedhomepage)
else:
template_values = {
'ext_contents': self.handleExtensionContents(getHostExt()),
'home_page': "True",
}
page = templateRender('homepage.tpl', node, template_values)
self.response.out.write( page )
log.debug("Served and cached fresh homepage.tpl key: %s " % sitekeyedhomepage)
setAppVar(CLOUDEXTRAMETA,{'x-goog-meta-sdotermlayer': getHostExt()})
PageStore.put(sitekeyedhomepage, page)
# self.response.out.write( open("static/index.html", 'r').read() )
return False # - Not caching homepage
log.info("Warning: got here how?")
return False
def getExtendedSiteName(self, layers):
"""Returns site name (domain name), informed by the list of active layers."""
if layers==["core"]:
return "schema.org"
if not layers or len(layers)==0:
return "schema.org"
return (getHostExt() + ".schema.org")
def emitSchemaorgHeaders(self, node, ext_mappings='', sitemode="default", sitename="schema.org", layers="core"):
self.response.out.write(self.buildSchemaorgHeaders(node, ext_mappings, sitemode, sitename, layers))
def buildSiteHeaders(self, term, ext_mappings='', sitemode="default", sitename="schema.org"):
"""
Generates, caches and emits HTML headers for class, property and enumeration pages. Leaves <body> open.
* entry = name of the class or property
"""
buff = sdoStringIO()
rdfs_type = 'rdfs:Class'
entry = term.id
if term.isProperty():
rdfs_type = 'rdfs:Property'
desc = entry
desc = self.getMetaDescription(term, lengthHint=200)
template_values = {
'entry': str(entry),
'desc' : desc,
'menu_sel': "Schemas",
'rdfs_type': rdfs_type,
'ext_mappings': ext_mappings,
'noindexpage': noindexpages
}
out = templateRender('genericTermPageHeader.tpl', term, template_values)
buff.write(out)
ret = buff.getvalue()
buff.close()
return ret
def buildSchemaorgHeaders(self, node, ext_mappings='', sitemode="default", sitename="schema.org", layers="core"):
"""
Generates, caches and emits HTML headers for class, property and enumeration pages. Leaves <body> open.
* entry = name of the class or property
"""
buff = sdoStringIO()
rdfs_type = 'rdfs:Property'
anode = True
if isinstance(node, str):
entry = node
anode = False
else:
entry = node.id
if node.isEnumeration():
rdfs_type = 'rdfs:Class'
elif node.isEnumerationValue():
rdfs_type = ""
nodeTypes = GetTargets(Unit.GetUnit("rdf:type"), node, layers=layers)
typecount = 0
for type in nodeTypes:
if typecount > 0:
rdfs_type += " "
rdfs_type += type.id
typecount += 1
elif node.isClass():
rdfs_type = 'rdfs:Class'
elif node.isAttribute():
rdfs_type = 'rdfs:Property'
desc = entry
if anode:
desc = self.getMetaDescription(node, layers=layers, lengthHint=200)
template_values = {
'entry': str(entry),
'desc' : desc,
'menu_sel': "Schemas",
'rdfs_type': rdfs_type,
'ext_mappings': ext_mappings,
'noindexpage': noindexpages
}
out = templateRender('genericTermPageHeader.tpl', node, template_values)
buff.write(out)
ret = buff.getvalue()
buff.close()
return ret
def getMetaDescription(self, term, layers="core",lengthHint=250):
ins = ""
if term.isEnumeration():
ins += " Enumeration Type"
elif term.isClass():
ins += " Type"
elif term.isProperty():
ins += " Property"
elif term.isEnumerationValue():
ins += " Enumeration Value"
desc = "Schema.org%s: %s - " % (ins, term.id)
lengthHint -= len(desc)
comment = term.getComment()
desc += ShortenOnSentence(StripHtmlTags(comment),lengthHint)
return desc
def appropriateLayers(self,layers="core"):
if ATTIC in layers:
return ALL_LAYERS
return ALL_LAYERS_NO_ATTIC
def emitExactTermPage(self, term, layers="core"):
"""Emit a Web page that exactly matches this node."""
log.info("EXACT PAGE: %s" % term.getId())
self.outputStrings = [] # blank slate
cached = getPageFromStore(term.getId())
if (cached != None):
log.info("GOT CACHED page for %s" % term.getId())
self.response.write(cached)
return
log.info("Building page")
ext_mappings = GetExtMappingsRDFa(term)
self.write(self.buildSiteHeaders(term, ext_mappings, sitemode, getSiteName()))
#log.info("Done buildSiteHeaders")
#log.info("Stak %s" % term.getTermStack())
self.emitUnitHeaders(term) # writes <h1><table>...
stack = self._removeStackDupes(term.getTermStack())
setAppVar("tableHdr",False)
if term.isClass() or term.isDataType() or term.isEnumeration():
for p in stack:
self.ClassProperties(p, p==[0], out=self, term=term)
if getAppVar("tableHdr"):
self.write("\n\n</table>\n\n")
self.emitClassIncomingProperties(term)
self.emitClassExtensionSuperclasses(term,layers)
#self.emitClassExtensionProperties(p,layers) #Not needed since extension defined properties displayed in main listing
elif term.isProperty():
self.emitAttributeProperties(term)
elif term.isDataType():
self.emitClassIncomingProperties(term)
self.emitSupersedes(term)
self.emitchildren(term)
self.emitAcksAndSources(term)
self.emitTermExamples(term)
self.write(" <br/>\n\n</div>\n</body>\n<!-- AppEngineVersion %s (%s)-->\n</html>" % (getAppEngineVersion(),appver))
page = "".join(self.outputStrings)
setAppVar(CLOUDEXTRAMETA,{'x-goog-meta-sdotermlayer': term.getLayer()})
PageStore.put(term.getId(),page)
self.response.write(page)
def emitTermExamples(self,term):
examples = GetExamples(term)
log.debug("Rendering n=%s examples" % len(examples))
if (len(examples) > 0):
example_labels = [
('Without Markup', 'original_html', 'selected'),
('Microdata', 'microdata', ''),
('RDFa', 'rdfa', ''),
('JSON-LD', 'jsonld', ''),
]
self.write("<b><a %s >Examples</a></b><br/><br/>\n\n" % self.showlink("examples"))
exNum = 0
for ex in sorted(examples, key=lambda u: u.keyvalue):
#if not ex.egmeta["layer"] in layers: #Example defined in extension we are not in
#continue
exNum += 1
id="example-%s" % exNum
if "id" in ex.egmeta:
id = ex.egmeta["id"]
self.write("<div><a %s>Example %s</a></div>" % (self.showlink(id),exNum))
self.write("<div class='ds-selector-tabs ds-selector'>\n")
self.write(" <div class='selectors'>\n")
for label, example_type, selected in example_labels:
self.write(" <a data-selects='%s' class='%s'>%s</a>\n"
% (example_type, selected, label))
self.write("</div>\n\n")
for label, example_type, selected in example_labels:
self.write("<pre class=\"prettyprint lang-html linenums %s %s\">%s</pre>\n\n"
% (example_type, selected, self.rep(ex.get(example_type))))
self.write("</div>\n\n")
def showlink(self,id):
ret = ""
if id and len(id):
ret = " id=\"%s\" title=\"Link: #%s\" href=\"#%s\" class=\"clickableAnchor\" " % (id,id,id)
return ret
def _removeStackDupes(self,stack):
cleanstack = []
i = len(stack)
while i:
i -= 1
if not stack[i] in cleanstack:
cleanstack.insert(0,stack[i])
return cleanstack
def emitAcksAndSources(self,term):
sources = term.getSources()
if len(sources):
s = ""
if len(sources) > 1:
s = "s"
self.write("<h4 id=\"acks\">Source%s</h4>\n" % s)
for val in sources:
if val.startswith("http://") or val.startswith("https://"):
val = "[%s](%s)" % (val,val) #Put into markdown format
self.write(Markdown.parse(val,True))
acknowledgements = term.getAcknowledgements()
if len(acknowledgements):
s = ""
if len(acknowledgements) > 1:
s = "s"
self.write("<h4 id=\"acks\">Acknowledgement%s</h4>\n" % s)
for ack in sorted(acknowledgements):
self.write(Markdown.parse(str(ack),True))
def emitchildren(self,term):
children = term.getSubs()
log.info("CHILDREN: %s" % VTerm.term2str(children))
if (len(children) > 0):
buff = StringIO.StringIO()
for c in children:
if c.superseded() or self.hideAtticTerm(c):
continue
buff.write("<li> %s </li>" % (self.ml(c)))
if (len(buff.getvalue()) > 0 and not term.isProperty()):
if term.isDataType():
self.write("<br/><b><a %s>More specific DataTypes</a></b><ul>" % self.showlink("subtypes"))
elif term.isClass() or term.isEnumerationValue():
self.write("<br/><b><a %s>More specific Types</a></b><ul>" % self.showlink("subtypes"))
elif term.isEnumeration():
self.write("<br/><b><a %s>Enumeration members</a></b><ul>" % self.showlink("enumbers"))
self.write(buff.getvalue())
self.write("</ul>")
buff.close()
def emitHTTPHeaders(self, node):
if ENABLE_CORS:
self.response.headers.add_header("Access-Control-Allow-Origin", "*") # entire site is public.
# see http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
def setupExtensionLayerlist(self, node):
# Identify which extension layer(s) are requested
# TODO: add subdomain support e.g. bib.schema.org/Globe
# instead of Globe?ext=bib which is more for debugging.
# 1. get a comma list from ?ext=foo,bar URL notation
extlist = cleanPath( self.request.get("ext") )# for debugging
extlist = re.sub(ext_re, '', extlist).split(',')
log.debug("?ext= extension list: '%s' " % ", ".join(extlist))
# 2. Ignore ?ext=, start with 'core' only.
layerlist = [ "core"]
# 3. Use host_ext if set, e.g. 'bib' from bib.schema.org
if getHostExt() != None:
log.debug("Host: %s host_ext: %s" % ( self.request.host , getHostExt() ) )
extlist.append(getHostExt())
# Report domain-requested extensions
for x in extlist:
#log.debug("Ext filter found: %s" % str(x))
if x in ["core", "localhost", ""]:
continue
layerlist.append("%s" % str(x))
layerlist = list(set(layerlist)) # dedup
#log.info("layerlist: %s" % layerlist)
return layerlist
def handleJSONContext(self, node):
"""Handle JSON-LD Context non-homepage requests (including refuse if not enabled)."""
if not ENABLE_JSONLD_CONTEXT:
self.error(404)
self.response.out.write('<title>404 Not Found.</title><a href="/">404 Not Found (JSON-LD Context not enabled.)</a><br/><br/>')
return True
ctype = "text/plain"
if (node=="docs/jsonldcontext.json.txt"):
label = "txt:jsonldcontext.json.txt"
ctype = "text/plain"
elif (node=="docs/jsonldcontext.json"):
label = "json:docs/jsonldcontext.json"
ctype = "application/json"
elif (node=="docs/jsonldcontext.jsonld"):
label = "jsonld:docs/jsonldcontext.jsonld"
ctype = "application/ld+json"
else:
return False
self.response.headers['Content-Type'] = ctype
jsonldcontext = getPageFromStore(label)
if not jsonldcontext:
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
PageStore.put(label,jsonldcontext)
if jsonldcontext:
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
return False
# see also handleHomepage for conneg'd version.
def handleSchemasPage(self, node, layerlist='core'):
page = getPageFromStore(node)
if page:
self.response.out.write( page )
log.debug("Serving recycled SchemasPage.")
return True
else:
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
extensions = []
for ex in sorted(ENABLED_EXTENSIONS):
if ex != ATTIC:
t = SdoConfig.getDescriptor(ex,"disambiguatingDescription")
extensions.append("<a title=\"%s\" href=\"%s\">%s.schema.org</a>" % (t,makeUrl(ex,"",full=True),ex))
page = templateRender('schemas.tpl', node, {'counts': self.getCounts(),
'extensions': extensions,
'attic': "<a href=\"%s\">%s.schema.org</a>" % (makeUrl(ATTIC,""),ATTIC),
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh SchemasPage.")
PageStore.put(node,page)
return True
def handleDumpsPage(self, node, layerlist='core'):
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
page = getPageFromStore(node)
if page:
self.response.out.write( page)
log.debug("Serving recycled DumpsPage.")
return True
else:
extensions = sorted(ENABLED_EXTENSIONS)
page = templateRender('developers.tpl', node, {'extensions': extensions,
'version': SCHEMA_VERSION,
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh DumpsPage.")
PageStore.put(node,page)
return True
def getCounts(self):
log.info("counts")
typesCount = str(countTypes(extension="core"))
log.info("TYPES %s" % typesCount)
propsCount = str(countProperties(extension="core"))
log.info("PROPS %s" % propsCount)
enumCount = str(countEnums(extension="core"))
log.info("ENUMS %s" % enumCount)
text = ""
text += "The core vocabulary currently consists of %s Types, " % typesCount
text += " %s Properties, " % propsCount
text += "and %s Enumeration values." % enumCount
return text
def handleFullHierarchyPage(self, node, layerlist='core'):
#label = 'FullTreePage - %s' % getHostExt()
#label = 'FullTreePage'
urlprefix = ''
label = node
if label.startswith('docs/'):
urlprefix = '..'
if getPageFromStore(label):
self.response.out.write( getPageFromStore(label) )
log.debug("Serving recycled %s." % label)
return True
else:
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
template = JINJA_ENVIRONMENT.get_template('full.tpl')
extlist=""
extonlylist=[]
count=0
for i in layerlist:
if i != "core":
sep = ""
if count > 0:
sep = ", "
extlist += "'%s'%s" % (i, sep)
extonlylist.append(i)
count += 1
local_button = ""
#local_label = "<h3>Core vocabulary</h3>"
local_label = ""
ext_button = ""
tops = self.gettops()
full_thing_tree = ""
thing_tree = ""
datatype_tree = ""
first = True
dtcount = 0
tcount = 0
mainroot = TypeHierarchyTree(local_label)
dtroot = TypeHierarchyTree("<h4>Data Types</h4>")
for t in tops:
if not first:
local_label = ""
first = False
top = VTerm.getTerm(t)
if top.isDataType() or top.getUri() == "http://schema.org/DataType":
dtcount += 1
dtroot.traverseForHTML(top, layers=layerlist, idprefix="D.", urlprefix=urlprefix)
else:
tcount += 1
mainroot.traverseForHTML(top, layers=layerlist, idprefix="C.", urlprefix=urlprefix, traverseAllLayers=True)
if dtcount:
datatype_tree += dtroot.toHTML()
if tcount:
full_thing_tree += mainroot.toHTML()
#fullmainroot = TypeHierarchyTree("<h3>Core plus all extension vocabularies</h3>")
#fullmainroot.traverseForHTML(uThing, layers=ALL_LAYERS_NO_ATTIC, idprefix="CE.", urlprefix=urlprefix)
#full_thing_tree = fullmainroot.toHTML()
ext_thing_tree = ""
#if len(extonlylist) > 0:
#extroot = TypeHierarchyTree("<h3>Extension: %s</h3>" % extlist)
#extroot.traverseForHTML(uThing, layers=extonlylist, traverseAllLayers=True, idprefix="E.", urlprefix=urlprefix)
#ext_thing_tree = extroot.toHTML()
#dtroot = TypeHierarchyTree("<h4>Data Types</h4>")
#dtroot.traverseForHTML(uDataType, layers=layerlist, idprefix="D.", urlprefix=urlprefix)
#datatype_tree = dtroot.toHTML()
full_button = "Core plus all extension vocabularies"
page = templateRender('full.tpl', node, { 'full_thing_tree': full_thing_tree,
'datatype_tree': datatype_tree,
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh %s." % label)
PageStore.put(label,page)
return True
def gettops(self):
return rdfgettops()
def handleJSONSchemaTree(self, node, layerlist='core'):
"""Handle a request for a JSON-LD tree representation of the schemas (RDFS-based)."""
if isinstance(node, Unit):
node = node.id
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
page = getPageFromStore(node)
if page:
self.response.out.write( page )
log.debug("Serving recycled JSONLDThingTree.")
return True
else:
mainroot = TypeHierarchyTree()
mainroot.traverseForJSONLD(VTerm.getTerm("Thing"), layers=layerlist)
thing_tree = mainroot.toJSON()
self.response.out.write( thing_tree )
log.debug("Serving fresh JSONLDThingTree.")
PageStore.put(node,thing_tree)
return True
return False
def checkConneg(self,node):
accept_header = self.request.headers.get('Accept')
if accept_header:
accept_header = accept_header.split(',')
else:
accept_header = ""
target = None
for ah in accept_header:
if target:
break
ah = re.sub( r";q=\d?\.\d+", '', ah).rstrip()
log.debug("ACCEPT %s" % ah)
if ah == "text/html":
return False
elif ah == "application/ld+json":
target = ".jsonld"
elif ah == "application/x-turtle":
target = ".ttl"
elif ah == "application/rdf+xml":
target = ".rdf"
elif ah == "text/plain":
target = ".nt"
elif ah == "text/csv":
target = ".csv"
if target:
self.response.set_status(303,"See Other")
self.response.headers['Location'] = makeUrl("","%s%s" % (node,target))
self.emitCacheHeaders()
return True
return False
def handleExactTermPage(self, node, layers='core'):
baseuri = SdoConfig.baseUri()
if node.startswith(baseuri): #Special case will map full schema URI to the term name
node = node[len(baseuri):]
"""Handle with requests for specific terms like /Person, /fooBar. """
dataext = os.path.splitext(node)
if dataext[1] in OUTPUTDATATYPES:
ret = self.handleExactTermDataOutput(dataext[0],dataext[1])
if ret == True:
return True
if self.checkConneg(node):
return True
log.info("GETTING TERM: %s" % node)
term = VTerm.getTerm(node)
if not term:
return False
if not self.checkNodeExt(term):
return False
if not SUBDOMAINS or term.inLayers(layers):
self.emitExactTermPage(term, layers=layers)
return True
def checkNodeExt(self,term):
if os.environ.get('STAYINEXTENTION',"False").lower() == "true":
return True
home = term.getLayer()
ext = getHostExt()
log.info("term: '%s' home: '%s' ext: '%s'" % (term,home,ext))
if home == CORE and ext == '':
return True
if SUBDOMAINS:
log.info("Checking for correct subdomain")
if home == ext:
return True
if home == CORE:
log.info("Redirecting to core entity")
self.redirectToBase(term.getId(),full=True)
else:
log.info("Redirecting to '%s' entity" % home)
self.redirectToExt(term.getId(),ext=home, full=True)
return False
else: #SUBDOMAINS == False
if ext == '':
return True
else:
log.info("SUBDOMAINS dissabled - Redirecting to core entity")
self.redirectToBase(term.getId(),full=True)
return False
def handleExactTermDataOutput(self, node=None, outputtype=None):
log.info("handleExactTermDataOutput Node: '%s' Outputtype: '%s'" % (node, outputtype))
ret = False
file = None
if node and outputtype:
term = VTerm.getTerm(node)
if term:
ret = True
index = "%s:%s%s" % (outputtype,node,outputtype)
data = getPageFromStore(index)
excludeAttic=True
if getHostExt()== ATTIC:
excludeAttic=False
if outputtype == ".csv":
self.response.headers['Content-Type'] = "text/csv; charset=utf-8"
if not data:
data = self.emitcsvTerm(term,excludeAttic)
PageStore.put(index,data)
else:
format = None
if outputtype == ".jsonld":
self.response.headers['Content-Type'] = "application/ld+json; charset=utf-8"
format = "json-ld"
elif outputtype == ".json":
self.response.headers['Content-Type'] = "application/json; charset=utf-8"
format = "json"
elif outputtype == ".ttl":
self.response.headers['Content-Type'] = "application/x-turtle; charset=utf-8"
format = "turtle"
elif outputtype == ".rdf" or outputtype == ".xml" :
self.response.headers['Content-Type'] = "application/rdf+xml; charset=utf-8"
format = "pretty-xml"
elif outputtype == ".nt":
self.response.headers['Content-Type'] = "text/plain; charset=utf-8"
format = "nt"
if format:
if not data:
data = serializeSingleTermGrapth(node=node, format=format, excludeAttic=True)
PageStore.put(index,data)
if data:
self.emitCacheHeaders()
self.response.out.write( data )
ret = True
return ret
def emitcsvTerm(self,term,excludeAttic=True):
csv = sdordf2csv(queryGraph=getQueryGraph(),fullGraph=getQueryGraph(),markdownComments=True,excludeAttic=excludeAttic)
file = StringIO.StringIO()
termUri = term.getUri()
if term.isClass() or term.isEnumerationValue():
csv.type2CSV(header=True,out=file)
csv.type2CSV(term=termUri,header=False,out=file)
elif term.isProperty():
csv.prop2CSV(header=True,out=file)
csv.prop2CSV(term=termUri,header=False,out=file)
data = file.getvalue()
file.close()
return data
def handle404Failure(self, node, layers="core", extrainfo=None, suggest=True):
self.error(404)
self.emitSchemaorgHeaders("404 Not Found")
#404 could be called from any path, so output all potential locations of schemaorg.css
self.response.out.write('<link rel="stylesheet" type="text/css" href="../docs/schemaorg.css" />')
self.response.out.write('<link rel="stylesheet" type="text/css" href="docs/schemaorg.css" />')
self.response.out.write('<link rel="stylesheet" type="text/css" href="/docs/schemaorg.css" />')
self.response.out.write('<h3>404 Not Found.</h3><p><br/>Page not found. Please <a href="/">try the homepage.</a><br/><br/></p>')
if suggest:
clean_node = cleanPath(node)
log.debug("404: clean_node: clean_node: %s node: %s" % (clean_node, node))
base_term = VTerm.getTerm( node.rsplit('/')[0] )
if base_term != None :
self.response.out.write('<div>Perhaps you meant: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_term.getId(), base_term.getId() ))
base_actionprop = VTerm.getTerm( node.rsplit('-')[0] )
if base_actionprop != None :
self.response.out.write('<div>Looking for an <a href="/Action">Action</a>-related property? Note that xyz-input and xyz-output have <a href="/docs/actions.html">special meaning</a>. See also: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_actionprop.getId(), base_actionprop.getId() ))
if extrainfo:
self.response.out.write("<div>%s</div>" % extrainfo)
self.response.out.write("</div>\n</body>\n<!--AppEngineVersion %s -->\n</html>\n" % getAppEngineVersion())
return True
def handleFullReleasePage(self, node, layerlist='core'):
"""Deal with a request for a full release summary page. Lists all terms and their descriptions inline in one long page.
version/latest/ is from current schemas, others will need to be loaded and emitted from stored HTML snapshots (for now)."""
# http://jinja.pocoo.org/docs/dev/templates/
global releaselog
clean_node = cleanPath(node)
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
requested_version = clean_node.rsplit('/')[1]
requested_format = clean_node.rsplit('/')[-1]
if len( clean_node.rsplit('/') ) == 2:
requested_format=""
log.info("Full release page for: node: '%s' cleannode: '%s' requested_version: '%s' requested_format: '%s' l: %s" % (node, clean_node, requested_version, requested_format, len(clean_node.rsplit('/')) ) )
# Full release page for: node: 'version/' cleannode: 'version/' requested_version: '' requested_format: '' l: 2
# /version/
log.debug("clean_node: %s requested_version: %s " % (clean_node, requested_version))
if (clean_node=="version/" or clean_node=="version") and requested_version=="" and requested_format=="":
log.info("Table of contents should be sent instead, then succeed.")
if getPageFromStore('tocVersionPage'):
self.response.out.write( getPageFromStore('tocVersionPage'))
return True
else:
log.debug("Serving tocversionPage from cache.")
page = templateRender('tocVersionPage.tpl', node,
{"releases": sorted(releaselog.iterkeys()),
"menu_sel": "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh tocVersionPage.")
PageStore.put("tocVersionPage",page)
return True
if requested_version in releaselog:
log.info("Version '%s' was released on %s. Serving from filesystem." % ( node, releaselog[requested_version] ))
version_rdfa = "data/releases/%s/schema.rdfa" % requested_version
version_allhtml = "data/releases/%s/schema-all.html" % requested_version
version_nt = "data/releases/%s/schema.nt" % requested_version
if requested_format=="":
self.response.out.write( open(version_allhtml, 'r').read() )
return True
# log.info("Skipping filesystem for now.")
if requested_format=="schema.rdfa":
self.response.headers['Content-Type'] = "application/octet-stream" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.html" % requested_version
self.response.out.write( open(version_rdfa, 'r').read() )
return True
if requested_format=="schema.nt":
self.response.headers['Content-Type'] = "application/n-triples" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.nt" % requested_version
self.response.out.write( open(version_nt, 'r').read() )
return True
if requested_format != "":
return False # Turtle, csv etc.
else:
log.info("Unreleased version requested. We only understand requests for latest if unreleased.")
if requested_version != "build-latest":
return False
log.info("giving up to 404.")
else: # build-latest
requested_version = SCHEMA_VERSION
log.info("generating a live view of this latest release (with SCHEMA_VERSION set as: %s)." % SCHEMA_VERSION)
if getPageFromStore('FullReleasePage.html'):
self.response.out.write( getPageFromStore('FullReleasePage.html') )
log.debug("Serving recycled FullReleasePage.")
return True
else:
mainroot = TypeHierarchyTree()
mainroot.traverseForHTML(VTerm.getTerm("Thing"), hashorslash="#term_", layers=layerlist)
thing_tree = mainroot.toHTML()
base_href = "/version/%s/" % requested_version
az_types = GetAllTypes()
az_types.sort()
az_type_meta = {}
az_props = GetAllProperties()
az_props.sort()
az_prop_meta = {}
# TYPES
for t in az_types:
props4type = HTMLOutput() # properties applicable for a type
props2type = HTMLOutput() # properties that go into a type
self.emitSimplePropertiesPerType(t, out=props4type, hashorslash="#term_" )
self.emitSimplePropertiesIntoType(t, out=props2type, hashorslash="#term_" )
tcmt = Markup(VTerm.getTerm(t).getComment())
az_type_meta[t]={}
az_type_meta[t]['comment'] = tcmt
az_type_meta[t]['props4type'] = props4type.toHTML()
az_type_meta[t]['props2type'] = props2type.toHTML()
# PROPERTIES
for pt in az_props:
attrInfo = HTMLOutput()
rangeList = HTMLOutput()
domainList = HTMLOutput()
# self.emitAttributeProperties(pt, out=attrInfo, hashorslash="#term_" )
# self.emitSimpleAttributeProperties(pt, out=rangedomainInfo, hashorslash="#term_" )
self.emitRangeTypesForProperty(pt, out=rangeList, hashorslash="#term_" )
self.emitDomainTypesForProperty(pt, out=domainList, hashorslash="#term_" )
cmt = Markup(VTerm.getTerm(pt).getComment())
az_prop_meta[pt] = {}
az_prop_meta[pt]['comment'] = cmt
az_prop_meta[pt]['attrinfo'] = attrInfo.toHTML()
az_prop_meta[pt]['rangelist'] = rangeList.toHTML()
az_prop_meta[pt]['domainlist'] = domainList.toHTML()
if requested_version == "build-latest":
requested_version = SCHEMA_VERSION
releasedate = "XXXX-XX-XX (UNRELEASED PREVIEW VERSION)"
else:
releasedate = releaselog[str(SCHEMA_VERSION)]
page = templateRender('fullReleasePage.tpl', node,
{"base_href": base_href,
'thing_tree': thing_tree,
'liveversion': SCHEMA_VERSION,
'requested_version': requested_version,
'releasedate': releasedate,
'az_props': az_props, 'az_types': az_types,
'az_prop_meta': az_prop_meta, 'az_type_meta': az_type_meta,
'menu_sel': "Documentation"})
self.response.out.write( page )
log.debug("Serving fresh FullReleasePage.")
PageStore.put("FullReleasePage.html",page)
return True
def handleExtensionContents(self,ext):
if not ext in ENABLED_EXTENSIONS:
return ""
# if getPageFromStore('ExtensionContents',ext):
# return getPageFromStore('ExtensionContents',ext)
buff = StringIO.StringIO()
az_terms = VTerm.getAllTerms(layer=ext) #Returns sorted by id results.
az_terms.sort(key = lambda u: u.category)
if len(az_terms) > 0:
buff.write("<br/><div style=\"text-align: left; margin: 2em\"><h3>Terms defined in the '%s' section.</h3>" % ext)
keys = []
groups = []
for k,g in itertools.groupby(az_terms, key = lambda u: u.category):
keys.append(k)
groups.append(list(g))
i = 0
while i < len(groups):
groups[i] = sorted(groups[i],key = lambda u: u.id)
i += 1
g=0
while g < len(groups):
if g > 0:
buff.write("<br/>")
buff.write(self.listTerms(groups[g],"<br/>%s Types (%s)<br/>" %
(keys[g],self.countTypes(groups[g],select="type",layers=ext)),select="type",layers=ext))
buff.write(self.listTerms(groups[g],"<br/>%s Properties (%s)<br/>" %
(keys[g],self.countTypes(groups[g],select="prop",layers=ext)),select="prop",layers=ext))
buff.write(self.listTerms(groups[g],"<br/>%s Enumeration values (%s)<br/>" %
(keys[g],self.countTypes(groups[g],select="enum",layers=ext)),select="enum",layers=ext))
g += 1
buff.write("</div>")
ret = buff.getvalue()
# PageStore.put('ExtensionContents',ret,ext)
buff.close()
return ret
def countTypes(self,interms,select="",layers='core'):
ret = 0
for t in interms:
if select == "type" and t.isClass():
ret += 1
elif select == "prop" and t.isProperty():
ret += 1
elif select == "enum" and t.isEnumerationValue():
ret +=1
elif select == "":
ret += 1
return ret
def listTerms(self,interms,prefix="",select=None,layers='core'):
buff = StringIO.StringIO()
terms = interms
if select:
terms = []
for t in interms:
use = False
if select == "type":
use = t.isClass()
elif select == "prop":
use = t.isProperty()
elif select == "enum":
use = t.isEnumerationValue()
if use:
terms.append(t)
if(len(terms) > 0):
buff.write(prefix)
first = True
sep = ""
for term in terms:
if not first:
sep = ", "
else:
first = False
buff.write("%s%s" % (sep,self.ml(term)))
ret = buff.getvalue()
buff.close()
return ret
def setupHostinfo(self, node, test=""):
global noindexpages
node = str(node)
hostString = test
host_ext = ""
args = []
if test == "":
hostString = self.request.host
args = self.request.arguments()
ver=None
if not getInTestHarness():
from google.appengine.api.modules.modules import get_current_version_name
ver = get_current_version_name()
if hostString.startswith("%s." % ver):
log.info("Removing version prefix '%s' from hoststring" % ver)
hostString = hostString[len(ver) + 1:]
scheme = "http" #Defalt for tests
if not getInTestHarness(): #Get the actual scheme from the request
scheme = self.request.scheme
setHttpScheme(scheme)
match = re.match( r'([\w\-_]+)[\.:]?', hostString)
host_ext = str(match.group(1))
match0 = str(match.group(0))
if host_ext + ":" == match0: #Special case for URLs with no subdomains - eg. localhost
host_ext = ""
split = hostString.rsplit(':')
myhost = split[0]
mybasehost = myhost
myport = "80"
if len(split) > 1:
myport = split[1]
setHostPort(myport)
log.info("setupHostinfo: data: scheme='%s' hoststring='%s' initial host_ext='%s'" % (scheme, hostString, str(host_ext) ))
ver=None
if not getInTestHarness():
from google.appengine.api.modules.modules import get_current_version_name
ver = get_current_version_name()
if host_ext != "":
if host_ext in ENABLED_EXTENSIONS:
mybasehost = mybasehost[len(host_ext) + 1:]
elif host_ext == "www":
mybasehost = mybasehost[4:]
setBaseHost(mybasehost)
log.info("Host extention '%s' - redirecting to '%s'" % (host_ext,mybasehost))
return self.redirectToBase(node,True)
else:
tempbase = mybasehost[len(host_ext)+1:]
if tempbase in WORKINGHOSTS: #Known hosts so can control extention values
mybasehost = tempbase
setHostExt("")
setBaseHost(mybasehost)
log.info("Host extention '%s' not enabled - redirecting to '%s'" % (host_ext,mybasehost))
return self.redirectToBase(node,True)
else: #Unknown host so host_ext may be just part of the host string
host_ext = ""
log.info("setupHostinfo: calculated: basehost='%s' host_ext='%s'" % (mybasehost, host_ext ))
setHostExt(host_ext)
setBaseHost(mybasehost)
if mybasehost == "schema.org":
noindexpages = False
if "FORCEINDEXPAGES" in os.environ:
if os.environ["FORCEINDEXPAGES"] == "True":
noindexpages = False
log.info("[%s] noindexpages: %s" % (getInstanceId(short=True),noindexpages))
setHostExt(host_ext)
setBaseHost(mybasehost)
setHostPort(myport)
setArguments(args)
dcn = host_ext
if dcn == None or dcn == "" or dcn =="core":
dcn = "core"
if scheme != "http":
dcn = "%s-%s" % (dcn,scheme)
dcn = "single" #Forcing single cache
#log.info("Forcing single cache. !!!!!!!!!!!!!!!!")
#log.info("sdoapp.py setting current datacache to: %s " % dcn)
DataCache.setCurrent(dcn)
PageStore.setCurrent(dcn)
HeaderStore.setCurrent(dcn)
debugging = False
if "localhost" in hostString or "sdo-phobos.appspot.com" in hostString or FORCEDEBUGGING:
debugging = True
setAppVar('debugging',debugging)
return True
def redirectToBase(self,node="",full=False):
uri = makeUrl("",node,full)
log.info("Redirecting [301] to: %s" % uri)
if not getInTestHarness():
self.response = webapp2.redirect(uri, True, 301)
return False
def redirectToExt(self,node="",ext="",full=False):
uri = makeUrl(ext,node,full)
log.info("Redirecting [301] to: %s" % uri)
if not getInTestHarness():
self.response = webapp2.redirect(uri, True, 301)
return False
def head(self, node):
self.get(node) #Get the page
#Clear the request & payload and only put the headers and status back
hdrs = self.response.headers.copy()
stat = self.response.status
self.response.clear()
self.response.headers = hdrs
self.response.status = stat
return
def get(self, node):
if not self.setupHostinfo(node):
return
log.info("NODE: '%s'" % node)
if not node or node == "":
node = "/"
if not validNode_re.search(str(node)) or os.path.basename(str(node)).count('.') > 2: #invalid node name
log.warning("Invalid node name '%s'" % str(node))
self.handle404Failure(node,suggest=False)
return
NotModified = False
matchTag = self.request.headers.get("If-None-Match",None)
unMod = self.request.headers.get("If-Unmodified-Since",None)
#log.info("matchTag '%s' unMod '%s'" % (matchTag,unMod))
hdrIndex = getHostExt()
if len(hdrIndex):
hdrIndex += ":"
hdrIndex += node
hdrs = HeaderStore.get(hdrIndex)
mod = None
if hdrs:
etag = hdrs.get("ETag",None)
mod = hdrs.get("Last-Modified",None)
log.info("stored etag '%s' mod '%s'" % (etag,mod))
if matchTag == etag:
NotModified = True
elif unMod:
unModt = datetime.datetime.strptime(unMod,"%a, %d %b %Y %H:%M:%S %Z")
modt = datetime.datetime.strptime(mod,"%a, %d %b %Y %H:%M:%S %Z")
if modt <= unModt:
log.info("Last mod '%s' not modified since '%s' " % (mod,unMod))
NotModified = True
if hdrs and "_pageFlush" in getArguments():
log.info("Reloading header for %s" % hdrIndex)
HeaderStore.remove(hdrIndex)
hdrs = None
NotModified = False
if NotModified:
self.response.clear()
self.response.headers = hdrs
self.response.set_status(304,"Not Modified")
else:
enableCaching = self._get(node) #Go get the page
if enableCaching:
if self.response.status.startswith("200"):
stat = getAppVar(CLOUDSTAT)
log.info("CLOUDSTAT %s" % stat)
if stat: #Use values from cloud storage
self.response.headers.add_header("ETag", stat.etag)
self.response.headers['Last-Modified'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",time.gmtime(stat.st_ctime))
self.response.headers['Content-Type'] = stat.content_type
else:
if not self.response.headers.get('Content-Type',None):
mimetype, contentType = mimetypes.guess_type(node)
self.response.headers['Content-Type'] = mimetype
self.response.headers.add_header("ETag", getslug() + str(hash(hdrIndex)))
self.response.headers['Last-Modified'] = getmodiftime().strftime("%a, %d %b %Y %H:%M:%S GMT")
store = True
if mod: #Previous hdrs cached for this node
new = self.response.headers.get('Last-Modified',None)
if new and new == mod: #previous cached hdrs has same time as new one
store = False #No point storing it again
if store:
retHdrs = self.response.headers.copy()
try:
HeaderStore.put(hdrIndex,retHdrs) #Cache these headers for a future 304 return
except Exception as e:
log.warning("HeaderStore.put(%s) returned exception: %s" % (hdrIndex,e))
log.info("Abandoning caching of response headers for '%s'" % node)
pass
#self.response.set_cookie('GOOGAPPUID', getAppEngineVersion())
log.info("Responding:\n%s\nstatus: %s\n%s" % (node,self.response.status,self.response.headers ))
def _get(self, node, doWarm=True):
global LOADEDSOURCES
"""Get a schema.org site page generated for this node/term.
Web content is written directly via self.response.
CORS enabled all URLs - we assume site entirely public.
See http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
These should give a JSON version of schema.org:
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json.txt
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/
Per-term pages vary for type, property and enumeration.
Last resort is a 404 error if we do not exactly match a term's id.
See also https://webapp-improved.appspot.com/guide/request.html#guide-request
Return True to enable browser caching ETag/Last-Modified - False for no cache
"""
global_vars.time_start = datetime.datetime.now()
tick() #keep system fresh
log.info("[%s] _get(%s)" % (getInstanceId(short=True),node))
self.callCount()
if (node in silent_skip_list):
return False
if ENABLE_HOSTED_EXTENSIONS:
layerlist = self.setupExtensionLayerlist(node) # e.g. ['core', 'bib']
else:
layerlist = ["core"]
setSiteName(self.getExtendedSiteName(layerlist)) # e.g. 'bib.schema.org', 'schema.org'
log.debug("EXT: set sitename to %s " % getSiteName())
if not LOADEDSOURCES:
log.info("Instance[%s] received request for not stored page: %s" % (getInstanceId(short=True), node) )
log.info("Instance[%s] needs to load sources to create it" % (getInstanceId(short=True)) )
load_sources() #Get Examples files and schema definitions
self.emitHTTPHeaders(node) #Ensure we have the right basic header values
if node.startswith("docs/"):
return self._getDocs(node,layerlist=layerlist)
if(node == "_ah/warmup"):
if "localhost" in os.environ['SERVER_NAME'] and WarmupState.lower() == "auto":
log.info("[%s] Warmup dissabled for localhost instance" % getInstanceId(short=True))
if DISABLE_NDB_FOR_LOCALHOST:
log.info("[%s] NDB dissabled for localhost instance" % getInstanceId(short=True))
enablePageStore("INMEM")
else:
if not memcache.get("warmedup"):
memcache.set("warmedup", value=True)
self.warmup()
else:
log.info("Warmup already actioned")
return False
#elif doWarm: #Do a bit of warming on each call
#global WarmedUp
#global Warmer
#if not WarmedUp:
#Warmer.stepWarm(self)
if(node == "admin/refresh"):
log.info("Processing refesh request")
load_start = datetime.datetime.now()
memcache.flush_all()
memcache.set(key="app_initialising", value=True, time=300) #Give the system 5 mins - auto remove flag in case of crash
cleanmsg = CacheControl.clean()
log.info("Clean count(s): %s" % cleanmsg)
log.info(("[%s] Cache clean took %s " % (getInstanceId(short=True),(datetime.datetime.now() - load_start))))
memcache.set(key="app_initialising", value=False)
storeInitialisedTimestamp()
self.emitSchemaorgHeaders("Refresh")
#404 could be called from any path, so output all potential locations of schemaorg.css
self.response.out.write('<link rel="stylesheet" type="text/css" href="../docs/schemaorg.css" />')
self.response.out.write('<link rel="stylesheet" type="text/css" href="docs/schemaorg.css" />')
self.response.out.write('<link rel="stylesheet" type="text/css" href="/docs/schemaorg.css" />')
self.response.out.write('<h3>Refresh Completed</h3><p>Took: %s</p>' % (datetime.datetime.now() - load_start))
return False
if(node == "_ah/start"):
log.info("Instance[%s] received Start request at %s" % (modules.get_current_instance_id(), global_vars.time_start) )
if "localhost" in os.environ['SERVER_NAME'] and WarmupState.lower() == "auto":
log.info("[%s] Warmup dissabled for localhost instance" % getInstanceId(short=True))
if DISABLE_NDB_FOR_LOCALHOST:
log.info("[%s] NDB dissabled for localhost instance" % getInstanceId(short=True))
enablePageStore("INMEM")
else:
if not memcache.get("warmedup"):
memcache.set("warmedup", value=True)
self.warmup()
else:
log.info("Warmup already actioned")
return False
if(node == "_ah/stop"):
log.info("Instance[%s] received Stop request at %s" % (modules.get_current_instance_id(), global_vars.time_start) )
log.info("Flushing memcache")
memcache.flush_all()
return False
if (node in ["", "/"]):
return self.handleHomepage(node)
currentVerPath = "version/%s" % SCHEMA_VERSION
if(node.startswith("version/latest")):
newurl = "%s%s" % (currentVerPath,node[14:])
log.info("REDIRECTING TO: %s" % newurl)
self.response.set_status(302,"Found")
self.response.headers['Location'] = makeUrl("",newurl)
self.emitCacheHeaders()
return False #don't cache this redirect
#Match nodes of pattern 'version/*' 'version/*/' or 'version/'
if (re.match(r'^version/[^/]*$', str(node)) or re.match(r'^version/[^/]*/$', str(node)) or node == "version/") :
if self.handleFullReleasePage(node, layerlist=layerlist):
return True
else:
log.info("Error handling full release page: %s " % node)
if self.handle404Failure(node):
return False
else:
log.info("Error handling 404 under /version/")
return False
if(node == "_siteDebug"):
if(getBaseHost() != "schema.org" or os.environ['PRODSITEDEBUG'] == "True"):
self.siteDebug()
return False #Treat as a dynamic page - suppress Etags etc.
if(node == "_cacheFlush"):
setmodiftime(datetime.datetime.utcnow()) #Resets etags and modtime
counts = CacheControl.clean(pagesonly=True)
inf = "<div style=\"clear: both; float: left; text-align: left; font-size: xx-small; color: #888 ; margin: 1em; line-height: 100%;\">"
inf += str(counts)
inf += "</div>"
self.handle404Failure(node,extrainfo=inf)
return False
# Pages based on request path matching a Unit in the term graph:
if self.handleExactTermPage(node, layers=layerlist):
return True
else:
log.info("Error handling exact term page. Assuming a 404: %s" % node)
# Drop through to 404 as default exit.
if self.handle404Failure(node):
return False
else:
log.info("Error handling 404.")
return False
def _getDocs(self, node, layerlist=""):
hstext = getHostExt()
if hstext == "":
hstext = "core"
if (node.startswith("docs/") and hstext != "core"): #All docs should operate in core
return self.redirectToBase(node,True)
if node in ["docs/jsonldcontext.json.txt", "docs/jsonldcontext.json", "docs/jsonldcontext.jsonld"]:
if self.handleJSONContext(node):
return True
else:
log.info("Error handling JSON-LD context: %s" % node)
return False
elif (node == "docs/full.html"):
if self.handleFullHierarchyPage(node, layerlist=layerlist):
return True
else:
log.info("Error handling full.html : %s " % node)
return False
elif (node == "docs/schemas.html"):
if self.handleSchemasPage(node, layerlist=layerlist):
return True
else:
log.info("Error handling schemas.html : %s " % node)
return False
elif (node == "docs/developers.html"):
if self.handleDumpsPage(node, layerlist=layerlist):
return True
else:
log.info("Error handling developers.html : %s " % node)
return False
elif (node == "docs/tree.jsonld" or node == "docs/tree.json"):
if self.handleJSONSchemaTree(node, layerlist=ALL_LAYERS):
return True
else:
log.info("Error handling JSON-LD schema tree: %s " % node)
return False
else: #Asking for a sttic file under docs
return self.handleStaticDoc(node)
def handleStaticDoc(self,node):
if PAGESTOREMODE == "CLOUDSTORE":
log.info("Asking for: %s" % node)
page = getPageFromStore(node,enableFlush=False)
if page:
self.response.out.write( page )
log.debug("Serving static page: %s" % node)
return True
else:
self.handle404Failure(node)
return False
return False
def siteDebug(self):
global STATS
page = templateRender('siteDebug.tpl', "_siteDebug" )
self.response.out.write( page )
ext = getHostExt()
if ext == "":
ext = "core"
self.response.out.write("<div style=\"display: none;\">\nLAYER:%s\n</div>" % ext)
self.response.out.write("<table style=\"width: 70%; border: solid 1px #CCCCCC; border-collapse: collapse;\"><tbody>\n")
self.writeDebugRow("Setting","Value",True)
if SHAREDSITEDEBUG:
self.writeDebugRow("System start",memcache.get("SysStart"))
inst = memcache.get("Instances")
extinst = memcache.get("ExitInstances")
self.writeDebugRow("Running instances(%s)" % len(memcache.get("Instances")),inst.keys())
self.writeDebugRow("Instance exits(%s)" % len(memcache.get("ExitInstances")),extinst.keys())
self.writeDebugRow("httpScheme",getHttpScheme())
self.writeDebugRow("host_ext",getHostExt())
self.writeDebugRow("basehost",getBaseHost())
self.writeDebugRow("hostport",getHostPort())
self.writeDebugRow("sitename",getSiteName())
self.writeDebugRow("debugging",getAppVar('debugging'))
self.writeDebugRow("intestharness",getInTestHarness())
if SHAREDSITEDEBUG:
self.writeDebugRow("total calls",memcache.get("total"))
for s in ALL_LAYERS:
self.writeDebugRow("%s calls" % s, memcache.get(s))
for s in ["http","https"]:
self.writeDebugRow("%s calls" % s, memcache.get(s))
self.writeDebugRow("This Instance ID",os.environ["INSTANCE_ID"],True)
self.writeDebugRow("Instance Calls", callCount)
self.writeDebugRow("Instance Memory Usage [Mb]", str(runtime.memory_usage()).replace("\n","<br/>"))
self.writeDebugRow("Instance Current DataCache", DataCache.getCurrent())
self.writeDebugRow("Instance DataCaches", len(DataCache.keys()))
for c in DataCache.keys():
self.writeDebugRow("Instance DataCache[%s] size" % c, len(DataCache.getCache(c) ))
self.response.out.write("</tbody><table><br/>\n")
self.response.out.write("</div>\n</body>\n<!--AppEngineVersion %s -->\n</html>\n" % getAppEngineVersion())
def writeDebugRow(self,term,value,head=False):
rt = "td"
cellStyle = "border: solid 1px #CCCCCC; vertical-align: top; border-collapse: collapse;"
if head:
rt = "th"
cellStyle += " color: #FFFFFF; background: #888888;"
leftcellStyle = cellStyle
leftcellStyle += " width: 35%"
divstyle = "width: 100%; max-height: 100px; overflow: auto"
self.response.out.write("<tr><%s style=\"%s\">%s</%s><%s style=\"%s\"><div style=\"%s\">%s</div></%s></tr>\n" % (rt,leftcellStyle,term,rt,rt,cellStyle,divstyle,value,rt))
def callCount(self):
global instance_first
global instance_num
global callCount
callCount += 1
if(instance_first):
instance_first = False
instance_num += 1
if SHAREDSITEDEBUG:
if(memcache.add(key="Instances",value={})):
memcache.add(key="ExitInstances",value={})
memcache.add(key="http",value=0)
memcache.add(key="https",value=0)
memcache.add(key="total",value=0)
for i in ALL_LAYERS:
memcache.add(key=i,value=0)
Insts = memcache.get("Instances")
Insts[os.environ["INSTANCE_ID"]] = 1
memcache.replace("Instances",Insts)
if SHAREDSITEDEBUG:
memcache.incr("total")
memcache.incr(getHttpScheme())
if getHostExt() != "":
memcache.incr(getHostExt())
else:
memcache.incr("core")
def warmup(self):
global WarmedUp
global Warmer
if WarmedUp:
return
warm_start = datetime.datetime.now()
log.debug("Instance[%s] received Warmup request at %s" % (modules.get_current_instance_id(), datetime.datetime.utcnow()) )
if memcache.get("Warming"):
log.debug("Instance[%s] detected system already warming" % (modules.get_current_instance_id()) )
else:
memcache.set("Warming",True,time=300)
Warmer.warmAll(self)
log.debug("Instance[%s] completed Warmup request at %s elapsed: %s" % (modules.get_current_instance_id(), datetime.datetime.utcnow(),datetime.datetime.now() - warm_start ) )
memcache.set("Warming",False)
class WarmupTool():
def __init__(self):
#self.pageList = ["docs/schemas.html"]
self.pageList = ["/","docs/schemas.html","docs/full.html","docs/tree.jsonld","docs/developers.html","docs/jsonldcontext.json"]
self.extPageList = ["/"] #Pages warmed in all extentions
self.warmPages = {}
for l in ALL_LAYERS:
self.warmPages[l] = []
self.warmedLayers = []
def stepWarm(self, unit=None, layer=None):
lock = threading.Lock()
with lock:
realHostExt = getHostExt()
if layer:
setHostExt(layer)
self._stepWarm(unit=unit, layer=layer)
setHostExt(realHostExt)
def _stepWarm(self, unit=None, layer=None):
global WarmedUp
if not layer:
layer = getHostExt()
if layer == "":
layer = "core"
if not unit or WarmedUp:
return
if layer in self.warmedLayers: #Done all for this layer
return
warmedPages = False
for p in self.pageList:
if p not in self.warmPages[layer]:
self.warmPages[layer].append(p)
if layer == "core" or p in self.extPageList: #Only warm selected pages in extensions
log.info("Warming page %s in layer %s" % (p,layer))
unit._get(p,doWarm=False)
unit.response.clear()
if len(self.warmPages[layer]) == len(self.pageList):
warmedPages = True
break
if warmedPages: #Must be all warmed for this layer
log.info("All warmed in layer %s" % layer)
self.warmedLayers.append(layer)
self.checkAll()
def checkAll(self):
global WarmedUp
allDone = True
for l in ALL_LAYERS:
if l != "" and l not in self.warmedLayers:
allDone = False
break
if allDone:
WarmedUp = True
log.info("All layers warmed!")
def warmAll(self,unit):
global WarmedUp
while not WarmedUp:
for l in ALL_LAYERS:
self.stepWarm(layer=l,unit=unit)
Warmer = WarmupTool()
def getExtenstionDescriptions():
extDisambiguatingDescription = ""
extComment = ""
extlinktext = ""
extVers = ""
extName = ""
extDD = ""
ex = getHostExt()
if ex and len(ex):
descs = api.SdoConfig.descriptor(ex)
if descs and len(descs):
extName = descs[0].get("name")
extDD = Markdown.parse(descs[0].get("brief"))
extVers = Markdown.parse(descs[0].get("version"))
extlinktext = Markdown.parse(descs[0].get("linktext"))
extComment = Markdown.parse(descs[0].get("comment"))
extDisambiguatingDescription = Markdown.parse(descs[0].get("extDisambiguatingDescription"))
return extName, extDD, extVers, extlinktext, extComment, extDisambiguatingDescription
def templateRender(templateName, node, values=None):
global sitemode #,sitename
#log.info("templateRender(%s,%s,%s)" % (templateName, node, values))
#log.info("getHostExt %s" % getHostExt())
if isinstance(node, Unit):
node = node.id
if isinstance(node, VTerm):
node = node.getId()
extName, extDD, extVers, extlinktext, extComment, extDisambiguatingDescription = getExtenstionDescriptions()
if node.startswith("docs/"):
docsdir = "./"
homedir = ".."
elif node.startswith("version/"):
docsdir = "/docs/"
homedir = ""
else:
docsdir = "docs/"
homedir = "."
defvars = {
'ENABLE_HOSTED_EXTENSIONS': ENABLE_HOSTED_EXTENSIONS,
'SCHEMA_VERSION': SCHEMA_VERSION,
'appengineVersion': getAppEngineVersion(),
'debugging': getAppVar('debugging'),
'docsdir': docsdir,
'extlinktext': extlinktext,
'extDisambiguatingDescription':extDisambiguatingDescription,
'extComment': extComment,
'extDD': extDD,
'extName': extName,
'extVers': extVers,
'extensionPath': makeUrl(getHostExt(),"",full=True),
'homedir': homedir,
'host_ext': getHostExt(),
'mybasehost': getBaseHost(),
'myhost': getHost(),
'myport': getHostPort(),
'sitemode': sitemode,
'sitename': SdoConfig.getname(),
'staticPath': homedir,
'targethost': makeUrl("","",full=True),
'vocabUri': SdoConfig.vocabUri()
}
if values:
defvars.update(values)
template = JINJA_ENVIRONMENT.get_template(templateName)
return template.render(defvars)
def oldtemplateRender(templateName, node, values=None):
global sitemode #,sitename
log.info("templateRender(%s,%s,%s)" % (templateName, node, values))
log.info("getHostExt %s" % getHostExt())
if isinstance(node, Unit):
node = node.id
extDef = Unit.GetUnit(getNss(getHostExt()),True)
extComment = ""
extVers = ""
extName = ""
#log.info("EXDEF '%s'" % extDef)
if extDef:
extComment = GetComment(extDef,ALL_LAYERS)
if extComment == "-":
extComment = ""
extDDs = GetTargets(Unit.GetUnit("schema:disambiguatingDescription", True), extDef, layers=ALL_LAYERS )
if len(extDDs) > 0:
extDD = Markdown.parse(extDDs[0])
else:
extDD = ""
first = True
for ver in GetsoftwareVersions(extDef, ALL_LAYERS):
if first:
first = False
extVers = "<em>(Extension version: "
else:
extVers += ", "
extVers += Markdown.parse(ver)
if len(extVers) :
extVers += ")</em>"
nms = GetTargets(Unit.GetUnit("schema:name", True), extDef, layers=ALL_LAYERS )
if len(nms) > 0:
extName = nms[0]
if node.startswith("docs/"):
docsdir = "./"
homedir = ".."
else:
docsdir = "docs/"
homedir = "."
defvars = {
'ENABLE_HOSTED_EXTENSIONS': ENABLE_HOSTED_EXTENSIONS,
'SCHEMA_VERSION': SCHEMA_VERSION,
'SUBDOMAINS': SUBDOMAINS,
'sitemode': sitemode,
'sitename': SdoConfig.getname(),
'staticPath': homedir,
'extensionPath': makeUrl(getHostExt(),"",full=True),
'myhost': getHost(),
'myport': getHostPort(),
'mybasehost': getBaseHost(),
'host_ext': getHostExt(),
'extComment': extComment,
'docsdir': docsdir,
'homedir': homedir,
'extDD': extDD,
'extVers': extVers,
'extName': extName,
'targethost': makeUrl("","",full=True),
'debugging': getAppVar('debugging'),
'appengineVersion': getAppEngineVersion()
}
if values:
defvars.update(values)
template = JINJA_ENVIRONMENT.get_template(templateName)
return template.render(defvars)
def my_shutdown_hook():
global instance_num
if SHAREDSITEDEBUG:
Insts = memcache.get("ExitInstances")
if Insts:
Insts[os.environ["INSTANCE_ID"]] = 1
memcache.replace("ExitInstances",Insts)
memcache.add("Exits",0)
memcache.incr("Exits")
log.info("Instance[%s] shutting down" % modules.get_current_instance_id())
runtime.set_shutdown_hook(my_shutdown_hook)
def setHttpScheme(val):
setAppVar('httpScheme',val)
def getHttpScheme():
return getAppVar('httpScheme')
def setHostExt(val):
setAppVar('host_ext',val)
def getHostExt():
return getAppVar('host_ext')
def setSiteName(val):
setAppVar('sitename',val)
def getSiteName():
return getAppVar('sitename')
def setHost(val):
setAppVar('myhost',val)
def getHost():
return getAppVar('myhost')
def setBaseHost(val):
setAppVar('mybasehost',val)
def getBaseHost():
return getAppVar('mybasehost')
def setHostPort(val):
setAppVar('myport',val)
def getHostPort():
return getAppVar('myport')
def setArguments(val):
setAppVar('myarguments',val)
def getArguments():
return getAppVar('myarguments')
def makeUrl(ext="",path="",full=False,scheme=None):
port = ""
sub = ""
p = ""
if(getHostPort() != "80"):
port = ":%s" % getHostPort()
if ext != "core" and ext != "":
sub = "%s." % ext
if path != "":
if path.startswith("/"):
p = path
else:
p = "/%s" % path
if full:
if not scheme:
scheme = getHttpScheme()
targethost = os.environ.get("TARGETSITE",getBaseHost())
url = "%s://%s%s%s%s" % (scheme,sub,targethost,port,p)
else:
url = "%s" % (p)
return url
def getPageFromStore(id,ext=None,enableFlush=True):
cached = PageStore.get(id,ext)
if enableFlush and cached and "_pageFlush" in getArguments():
log.info("Reloading page for %s" % id)
PageStore.remove(id,ext)
cached = None
return cached
schemasInitialized = False
def load_schema_definitions(refresh=False):
global schemasInitialized
if not schemasInitialized or refresh:
log.info("STARTING UP... reading schemas.")
#load_graph(loadExtensions=ENABLE_HOSTED_EXTENSIONS)
if SdoConfig.isValid():
read_schemas(SdoConfig.termFiles())
load_usage_data(SdoConfig.countsFiles())
else:
read_local_schemas(loadExtensions=ENABLE_HOSTED_EXTENSIONS)
if ENABLE_HOSTED_EXTENSIONS:
read_extensions(ENABLED_EXTENSIONS)
schemasInitialized = True
LOADINGSOURCE = None
WAITSECS = 360
def load_sources():
global LOADINGSOURCE, LOADEDSOURCES,WAITSECS
if LOADEDSOURCES:
return
if LOADINGSOURCE: #Another thread may already be here
elapsedSecs = 0
while LOADINGSOURCE and elapsedSecs < WAITSECS:
time.sleep(0.1)
if LOADINGSOURCE: #If still loading, check timing and go around again
elapsed = datetime.datetime.now() - LOADINGSOURCE
elapsedSecs = elapsed.total_seconds()
if elapsedSecs >= WAITSECS: # Clear potential thread block caused by another thread crashing out leaving flags set
log.info("LOADINGSOURCE Thread blocked for over %s seconds - clearing lock" % WAITSECS)
LOADINGSOURCE = None
if not LOADEDSOURCES and not LOADINGSOURCE: # Check again in case things have changed in above loop
LOADINGSOURCE = datetime.datetime.now()
load_start = datetime.datetime.now()
load_schema_definitions()
log.info(("[%s] Term definitions load took %s " % (getInstanceId(short=True),(datetime.datetime.now() - load_start))))
load_start = datetime.datetime.now()
load_examples_data(ENABLED_EXTENSIONS)
log.info(("[%s] Examples load took %s " % (getInstanceId(short=True),(datetime.datetime.now() - load_start))))
LOADEDSOURCES=True
LOADINGSOURCE=None
if getInTestHarness():
load_sources()
else:
app = ndb.toplevel(webapp2.WSGIApplication([("/(.*)", ShowUnit)]))
|
#!/usr/bin/env python3
import logging_util
class Fibonacci:
"""
https://en.wikipedia.org/wiki/Fibonacci_number
Uses memoization to store previous results in a dictionary and reduce execution time
https://en.wikipedia.org/wiki/Memoization
"""
def __init__(self):
self.logger = logging_util.get_logger('fib')
# memoized results of numbers in fibonacci sequence
# Could use an array. This could save some memory use index instead of key.
# Use dictionary, more general memoization solution and probably a little more convenient.
# e.g. dictionary.get(n) is safer and easier than avoiding index out of bounds
# key: value == n: fib(n)
# seed with fib(0), fib(1)
self.results = {0: 0, 1: 1}
def fibonacci(self, n: int) -> int:
"""
:param n: an integer >= 0
:return: Fibonacci number
Execution times on Macbook Pro
recursive fibonacci with no storage of previous results
fibonacci(36) requires ~ 10 seconds
recursive fibonacci with memoization of previous results
fibonacci(36) requires ~ 0.001 seconds
"""
self.logger.info(f'fibonacci({n})')
if n < 0:
return None
elif self.results.get(n) is not None:
# return memoized result
return self.results.get(n)
else:
# recurse
# calculating index-2 before index-1 may help optimize, I didn't test that.
result = self.fibonacci(n - 2) + self.fibonacci(n - 1)
self.results[n] = result
return result
def fibonacci_iterative(self, n: int) -> int:
""" Iterative solution can require less stack space than recursive solution
:param n: an integer >= 0
:return: Fibonacci number
"""
self.logger.info(f'fibonacci_iterative({n})')
if n < 0:
return None
elif self.results.get(n) is not None:
# return memoized result
return self.results.get(n)
# by incrementing index from 2 to n, loop guarantees
# self.results[index - 2] and self.results[index - 1]
# already contain values
# range excludes upper value so use n + 1
for index in range(2, n + 1):
# calculating index-2 before index-1 may help optimize, I didn't test that.
result = self.results[index - 2] + self.results[index - 1]
self.results[index] = result
return self.results[n]
if __name__ == "__main__":
fib = Fibonacci()
result = fib.fibonacci(5)
fib.logger.info(f'{result}')
Add log statements
#!/usr/bin/env python3
import logging_util
class Fibonacci:
"""
https://en.wikipedia.org/wiki/Fibonacci_number
Uses memoization to store previous results in a dictionary and reduce execution time
https://en.wikipedia.org/wiki/Memoization
"""
def __init__(self):
self.logger = logging_util.get_logger('fib')
# memoized results of numbers in fibonacci sequence
# Could use an array. This could save some memory use index instead of key.
# Use dictionary, more general memoization solution and probably a little more convenient.
# e.g. dictionary.get(n) is safer and easier than avoiding index out of bounds
# key: value == n: fib(n)
# seed with fib(0), fib(1)
self.results = {0: 0, 1: 1}
def fibonacci(self, n: int) -> int:
"""
:param n: an integer >= 0
:return: Fibonacci number
Execution times on Macbook Pro
recursive fibonacci with no storage of previous results
fibonacci(36) requires ~ 10 seconds
recursive fibonacci with memoization of previous results
fibonacci(36) requires ~ 0.001 seconds
"""
self.logger.info(f'fibonacci({n})')
if n < 0:
return None
elif self.results.get(n) is not None:
self.logger.debug(f'returning memoized results.get({n}) == {self.results.get(n)}')
return self.results.get(n)
else:
# recurse
self.logger.debug(f'fibonacci({n - 2}) + fibonacci({n - 1})')
# calculating index-2 before index-1 may help optimize, I didn't test that.
result = self.fibonacci(n - 2) + self.fibonacci(n - 1)
self.results[n] = result
return result
def fibonacci_iterative(self, n: int) -> int:
""" Iterative solution can require less stack space than recursive solution
:param n: an integer >= 0
:return: Fibonacci number
"""
self.logger.info(f'fibonacci_iterative({n})')
if n < 0:
return None
elif self.results.get(n) is not None:
# return memoized result
return self.results.get(n)
# by incrementing index from 2 to n, loop guarantees
# self.results[index - 2] and self.results[index - 1]
# already contain values
# range excludes upper value so use n + 1
for index in range(2, n + 1):
# calculating index-2 before index-1 may help optimize, I didn't test that.
result = self.results[index - 2] + self.results[index - 1]
self.results[index] = result
return self.results[n]
if __name__ == "__main__":
fib = Fibonacci()
result = fib.fibonacci(10)
|
"""
Copyright (c) 2017 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import argparse
import json
import logging
import uuid
from random import sample
import cassandra.concurrent
from cassandra.cluster import Cluster
from cassandra.policies import RoundRobinPolicy, TokenAwarePolicy
from solrcloudpy import SolrConnection, SearchOptions
from six.moves import input
solr_connection = None
solr_collection = None
cassandra_cluster = None
cassandra_session = None
cassandra_table = None
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().handlers[0].setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s:%(name)s: %(message)s", datefmt="%Y-%m-%dT%H:%M:%S"))
def init(args):
global solr_connection
solr_connection = SolrConnection(args.solr)
global solr_collection
solr_collection = solr_connection[args.collection]
dc_policy = RoundRobinPolicy()
token_policy = TokenAwarePolicy(dc_policy)
global cassandra_cluster
cassandra_cluster = Cluster(contact_points=args.cassandra, port=args.cassandraPort,
protocol_version=int(args.cassandraProtocolVersion),
load_balancing_policy=token_policy)
global cassandra_session
cassandra_session = cassandra_cluster.connect(keyspace=args.cassandraKeyspace)
global cassandra_table
cassandra_table = args.cassandraTable
def delete_by_query(args):
if args.query:
se = SearchOptions()
se.commonparams.q(args.query) \
.fl('id')
for fq in args.filterquery if args.filterquery is not None else []:
se.commonparams.fq(fq)
query = se
elif args.jsonparams:
se = SearchOptions(**json.loads(args.jsonparams))
se.commonparams.fl('id')
query = se
else:
raise RuntimeError("either query or jsonparams is required")
if check_query(query):
logging.info("Collecting tiles ....")
solr_docs = do_solr_query(query)
if confirm_delete(len(solr_docs)):
deleted_ids = do_delete(solr_docs)
logging.info("Deleted tile IDs %s" % json.dumps([str(doc_id) for doc_id in deleted_ids], indent=2))
else:
logging.info("Exiting")
return
else:
logging.info("Exiting")
return
def confirm_delete(num_found):
do_continue = input(
"This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found)
while do_continue not in ['y', 'n']:
do_continue = input(
"This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found)
return do_continue == 'y'
def check_query(query):
solr_response = solr_collection.search(query)
num_found = solr_response.result.response.numFound
if num_found == 0:
logging.info("Query returned 0 results")
return False
do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found)
while do_continue not in ['y', 'n', 's', '']:
do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found)
if do_continue == 'y' or do_continue == '':
return True
elif do_continue == 'n':
return False
else:
se = SearchOptions()
se.commonparams.q('id:%s' % sample(solr_response.result.response.docs, 1)[0]['id'])
logging.info(json.dumps(solr_collection.search(se).result.response.docs[0], indent=2))
return check_query(query)
def do_solr_query(query):
doc_ids = []
next_cursor_mark = "*"
query.commonparams.sort('id asc')
while True:
query.commonparams.remove_param('cursorMark')
query.commonparams.add_params(cursorMark=next_cursor_mark)
solr_response = solr_collection.search(query)
try:
result_next_cursor_mark = solr_response.result.nextCursorMark
except AttributeError:
# No Results
return []
if result_next_cursor_mark == next_cursor_mark:
break
else:
next_cursor_mark = solr_response.result.nextCursorMark
doc_ids.extend([uuid.UUID(doc['id']) for doc in solr_response.result.response.docs])
return doc_ids
def do_delete(doc_ids):
logging.info("Executing Cassandra delete...")
delete_from_cassandra(doc_ids)
logging.info("Executing Solr delete...")
delete_from_solr(doc_ids)
return doc_ids
def delete_from_cassandra(doc_ids):
statement = cassandra_session.prepare("DELETE FROM %s WHERE tile_id=?" % cassandra_table)
results = cassandra.concurrent.execute_concurrent_with_args(cassandra_session, statement,
[(doc_id,) for doc_id in doc_ids])
for (success, result) in results:
if not success:
logging.warn("Could not delete tile %s" % result)
def delete_from_solr(doc_ids):
for doc_id in doc_ids:
solr_collection.delete({'q': "id:%s" % doc_id}, commit=False)
solr_collection.commit()
def parse_args():
parser = argparse.ArgumentParser(description='Delete data from NEXUS using a Solr Query',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--solr',
help='The url of the SOLR server.',
required=True,
metavar='127.0.0.1:8983')
parser.add_argument('--collection',
help='The name of the SOLR collection.',
required=True,
metavar='nexustiles')
parser.add_argument('--cassandra',
help='The hostname(s) or IP(s) of the Cassandra server(s).',
required=True,
nargs='+',
metavar=('127.0.0.100', '127.0.0.101'))
parser.add_argument('-k', '--cassandraKeyspace',
help='The Cassandra keyspace.',
required=True,
metavar='nexustiles')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-q', '--query',
help='The ''q'' parameter passed to SOLR Search',
metavar='*:*')
group.add_argument('--jsonparams',
help='Full query prameters formatted as JSON')
parser.add_argument('-fq', '--filterquery',
help='The ''fq'' parameter passed to SOLR Search. Only used if --jsonparams is not provided',
required=False,
nargs='+')
parser.add_argument('-t', '--cassandraTable',
help='The name of the cassandra table.',
required=False,
default='sea_surface_temp')
parser.add_argument('-p', '--cassandraPort',
help='The version of the Cassandra protocol the driver should use.',
required=False,
default='9042')
parser.add_argument('-pv', '--cassandraProtocolVersion',
help='The version of the Cassandra protocol the driver should use.',
required=False,
choices=['1', '2', '3', '4', '5'],
default='3')
return parser.parse_args()
if __name__ == "__main__":
the_args = parse_args()
init(the_args)
delete_by_query(the_args)
updated help message
"""
Copyright (c) 2017 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import argparse
import json
import logging
import uuid
from random import sample
import cassandra.concurrent
from cassandra.cluster import Cluster
from cassandra.policies import RoundRobinPolicy, TokenAwarePolicy
from solrcloudpy import SolrConnection, SearchOptions
from six.moves import input
solr_connection = None
solr_collection = None
cassandra_cluster = None
cassandra_session = None
cassandra_table = None
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().handlers[0].setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s:%(name)s: %(message)s", datefmt="%Y-%m-%dT%H:%M:%S"))
def init(args):
global solr_connection
solr_connection = SolrConnection(args.solr)
global solr_collection
solr_collection = solr_connection[args.collection]
dc_policy = RoundRobinPolicy()
token_policy = TokenAwarePolicy(dc_policy)
global cassandra_cluster
cassandra_cluster = Cluster(contact_points=args.cassandra, port=args.cassandraPort,
protocol_version=int(args.cassandraProtocolVersion),
load_balancing_policy=token_policy)
global cassandra_session
cassandra_session = cassandra_cluster.connect(keyspace=args.cassandraKeyspace)
global cassandra_table
cassandra_table = args.cassandraTable
def delete_by_query(args):
if args.query:
se = SearchOptions()
se.commonparams.q(args.query) \
.fl('id')
for fq in args.filterquery if args.filterquery is not None else []:
se.commonparams.fq(fq)
query = se
elif args.jsonparams:
se = SearchOptions(**json.loads(args.jsonparams))
se.commonparams.fl('id')
query = se
else:
raise RuntimeError("either query or jsonparams is required")
if check_query(query):
logging.info("Collecting tiles ....")
solr_docs = do_solr_query(query)
if confirm_delete(len(solr_docs)):
deleted_ids = do_delete(solr_docs)
logging.info("Deleted tile IDs %s" % json.dumps([str(doc_id) for doc_id in deleted_ids], indent=2))
else:
logging.info("Exiting")
return
else:
logging.info("Exiting")
return
def confirm_delete(num_found):
do_continue = input(
"This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found)
while do_continue not in ['y', 'n']:
do_continue = input(
"This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found)
return do_continue == 'y'
def check_query(query):
solr_response = solr_collection.search(query)
num_found = solr_response.result.response.numFound
if num_found == 0:
logging.info("Query returned 0 results")
return False
do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found)
while do_continue not in ['y', 'n', 's', '']:
do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found)
if do_continue == 'y' or do_continue == '':
return True
elif do_continue == 'n':
return False
else:
se = SearchOptions()
se.commonparams.q('id:%s' % sample(solr_response.result.response.docs, 1)[0]['id'])
logging.info(json.dumps(solr_collection.search(se).result.response.docs[0], indent=2))
return check_query(query)
def do_solr_query(query):
doc_ids = []
next_cursor_mark = "*"
query.commonparams.sort('id asc')
while True:
query.commonparams.remove_param('cursorMark')
query.commonparams.add_params(cursorMark=next_cursor_mark)
solr_response = solr_collection.search(query)
try:
result_next_cursor_mark = solr_response.result.nextCursorMark
except AttributeError:
# No Results
return []
if result_next_cursor_mark == next_cursor_mark:
break
else:
next_cursor_mark = solr_response.result.nextCursorMark
doc_ids.extend([uuid.UUID(doc['id']) for doc in solr_response.result.response.docs])
return doc_ids
def do_delete(doc_ids):
logging.info("Executing Cassandra delete...")
delete_from_cassandra(doc_ids)
logging.info("Executing Solr delete...")
delete_from_solr(doc_ids)
return doc_ids
def delete_from_cassandra(doc_ids):
statement = cassandra_session.prepare("DELETE FROM %s WHERE tile_id=?" % cassandra_table)
results = cassandra.concurrent.execute_concurrent_with_args(cassandra_session, statement,
[(doc_id,) for doc_id in doc_ids])
for (success, result) in results:
if not success:
logging.warn("Could not delete tile %s" % result)
def delete_from_solr(doc_ids):
for doc_id in doc_ids:
solr_collection.delete({'q': "id:%s" % doc_id}, commit=False)
solr_collection.commit()
def parse_args():
parser = argparse.ArgumentParser(description='Delete data from NEXUS using a Solr Query',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--solr',
help='The url of the SOLR server.',
required=True,
metavar='127.0.0.1:8983')
parser.add_argument('--collection',
help='The name of the SOLR collection.',
required=True,
metavar='nexustiles')
parser.add_argument('--cassandra',
help='The hostname(s) or IP(s) of the Cassandra server(s).',
required=True,
nargs='+',
metavar=('127.0.0.100', '127.0.0.101'))
parser.add_argument('-k', '--cassandraKeyspace',
help='The Cassandra keyspace.',
required=True,
metavar='nexustiles')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-q', '--query',
help='The ''q'' parameter passed to SOLR Search',
metavar='*:*')
group.add_argument('--jsonparams',
help='Full query prameters formatted as JSON')
parser.add_argument('-fq', '--filterquery',
help='The ''fq'' parameter passed to SOLR Search. Only used if --jsonparams is not provided',
required=False,
nargs='+')
parser.add_argument('-t', '--cassandraTable',
help='The name of the cassandra table.',
required=False,
default='sea_surface_temp')
parser.add_argument('-p', '--cassandraPort',
help='The port used to connect to Cassandra.',
required=False,
default='9042')
parser.add_argument('-pv', '--cassandraProtocolVersion',
help='The version of the Cassandra protocol the driver should use.',
required=False,
choices=['1', '2', '3', '4', '5'],
default='3')
return parser.parse_args()
if __name__ == "__main__":
the_args = parse_args()
init(the_args)
delete_by_query(the_args)
|
# -*- coding: utf-8 -*-
import time as _time
import platform as _platform
if _platform.system() == 'Windows':
from. import _winmouse as _os_mouse
elif _platform.system() == 'Linux':
from. import _nixmouse as _os_mouse
elif _platform.system() == 'Darwin':
from. import _darwinmouse as _os_mouse
else:
raise OSError("Unsupported platform '{}'".format(_platform.system()))
from ._mouse_event import ButtonEvent, MoveEvent, WheelEvent, LEFT, RIGHT, MIDDLE, X, X2, UP, DOWN, DOUBLE
from ._generic import GenericListener as _GenericListener
_pressed_events = set()
class _MouseListener(_GenericListener):
def init(self):
_os_mouse.init()
def pre_process_event(self, event):
if isinstance(event, ButtonEvent):
if event.event_type in (UP, DOUBLE):
_pressed_events.discard(event.button)
else:
_pressed_events.add(event.button)
return True
def listen(self):
_os_mouse.listen(self.queue)
_listener = _MouseListener()
def is_pressed(button=LEFT):
""" Returns True if the given button is currently pressed. """
_listener.start_if_necessary()
return button in _pressed_events
def press(button=LEFT):
""" Presses the given button (but doesn't release). """
_os_mouse.press(button)
def release(button=LEFT):
""" Releases the given button. """
_os_mouse.release(button)
def click(button=LEFT):
""" Sends a click with the given button. """
_os_mouse.press(button)
_os_mouse.release(button)
def double_click(button=LEFT):
""" Sends a double click with the given button. """
click(button)
click(button)
def right_click():
""" Sends a right click with the given button. """
click(RIGHT)
def wheel(delta=1):
""" Scrolls the wheel `delta` clicks. Sign indicates direction. """
_os_mouse.wheel(delta)
def move(x, y, absolute=True, duration=0):
"""
Moves the mouse. If `absolute`, to position (x, y), otherwise move relative
to the current position. If `duration` is non-zero, animates the movement.
"""
x = int(x)
y = int(y)
# Requires an extra system call on Linux, but `move_relative` is measured
# in millimiters so we would lose precision.
position_x, position_y = get_position()
if not absolute:
x = position_x + x
y = position_y + y
if duration:
start_x = position_x
start_y = position_y
dx = x - start_x
dy = y - start_y
if dx == 0 and dy == 0:
_time.sleep(duration)
else:
# 120 movements per second.
# Round and keep float to ensure float division in Python 2
steps = max(1.0, float(int(duration * 120.0)))
for i in range(int(steps)+1):
move(start_x + dx*i/steps, start_y + dy*i/steps)
_time.sleep(duration/steps)
else:
_os_mouse.move_to(x, y)
def drag(start_x, start_y, end_x, end_y, absolute=True, duration=0):
"""
Holds the left mouse button, moving from start to end position, then
releases. `absolute` and `duration` are parameters regarding the mouse
movement.
"""
if is_pressed():
release()
move(start_x, start_y, absolute, 0)
press()
move(end_x, end_y, absolute, duration)
release()
def on_button(callback, args=(), buttons=(LEFT, MIDDLE, RIGHT, X, X2), types=(UP, DOWN, DOUBLE)):
""" Invokes `callback` with `args` when the specified event happens. """
if not isinstance(buttons, (tuple, list)):
buttons = (buttons,)
if not isinstance(types, (tuple, list)):
types = (types,)
def handler(event):
if isinstance(event, ButtonEvent):
if event.event_type in types and event.button in buttons:
callback(*args)
_listener.add_handler(handler)
return handler
def on_click(callback, args=()):
""" Invokes `callback` with `args` when the left button is clicked. """
return on_button(callback, args, [LEFT], [UP])
def on_double_click(callback, args=()):
"""
Invokes `callback` with `args` when the left button is double clicked.
"""
return on_button(callback, args, [LEFT], [DOUBLE])
def on_right_click(callback, args=()):
""" Invokes `callback` with `args` when the right button is clicked. """
return on_button(callback, args, [RIGHT], [UP])
def on_middle_click(callback, args=()):
""" Invokes `callback` with `args` when the middle button is clicked. """
return on_button(callback, args, [MIDDLE], [UP])
def wait(button=LEFT, target_types=(UP, DOWN, DOUBLE)):
"""
Blocks program execution until the given button performs an event.
"""
from threading import Lock
lock = Lock()
lock.acquire()
handler = on_button(lock.release, (), [button], target_types)
lock.acquire()
_listener.remove_handler(handler)
def get_position():
""" Returns the (x, y) mouse position. """
return _os_mouse.get_position()
def hook(callback):
"""
Installs a global listener on all available mouses, invoking `callback`
each time it is moved, a key status changes or the wheel is spun. A mouse
event is passed as argument, with type either `mouse.ButtonEvent`,
`mouse.WheelEvent` or `mouse.MoveEvent`.
Returns the given callback for easier development.
"""
_listener.add_handler(callback)
return callback
def unhook(callback):
"""
Removes a previously installed hook.
"""
_listener.remove_handler(callback)
def unhook_all():
"""
Removes all hooks registered by this application. Note this may include
hooks installed by high level functions, such as `record`.
"""
del _listener.handlers[:]
def record(button=RIGHT, target_types=(DOWN,)):
"""
Records all mouse events until the user presses the given button.
Then returns the list of events recorded. Pairs well with `play(events)`.
Note: this is a blocking function.
Note: for more details on the mouse hook and events see `hook`.
"""
recorded = []
hook(recorded.append)
wait(button=button, target_types=target_types)
unhook(recorded.append)
return recorded
def play(events, speed_factor=1.0, include_clicks=True, include_moves=True, include_wheel=True):
"""
Plays a sequence of recorded events, maintaining the relative time
intervals. If speed_factor is <= 0 then the actions are replayed as fast
as the OS allows. Pairs well with `record()`.
The parameters `include_*` define if events of that type should be inluded
in the replay or ignored.
"""
last_time = None
for event in events:
if speed_factor > 0 and last_time is not None:
_time.sleep((event.time - last_time) / speed_factor)
last_time = event.time
if isinstance(event, ButtonEvent) and include_clicks:
if event.event_type == UP:
_os_mouse.release(event.button)
else:
_os_mouse.press(event.button)
elif isinstance(event, MoveEvent) and include_moves:
_os_mouse.move_to(event.x, event.y)
elif isinstance(event, WheelEvent) and include_wheel:
_os_mouse.wheel(event.delta)
replay = play
if __name__ == '__main__':
print('Recording... Double click to stop and replay.')
play(record())
Updated mouse.py
Added alias `hold` for `press`
# -*- coding: utf-8 -*-
import time as _time
import platform as _platform
if _platform.system() == 'Windows':
from. import _winmouse as _os_mouse
elif _platform.system() == 'Linux':
from. import _nixmouse as _os_mouse
elif _platform.system() == 'Darwin':
from. import _darwinmouse as _os_mouse
else:
raise OSError("Unsupported platform '{}'".format(_platform.system()))
from ._mouse_event import ButtonEvent, MoveEvent, WheelEvent, LEFT, RIGHT, MIDDLE, X, X2, UP, DOWN, DOUBLE
from ._generic import GenericListener as _GenericListener
_pressed_events = set()
class _MouseListener(_GenericListener):
def init(self):
_os_mouse.init()
def pre_process_event(self, event):
if isinstance(event, ButtonEvent):
if event.event_type in (UP, DOUBLE):
_pressed_events.discard(event.button)
else:
_pressed_events.add(event.button)
return True
def listen(self):
_os_mouse.listen(self.queue)
_listener = _MouseListener()
def is_pressed(button=LEFT):
""" Returns True if the given button is currently pressed. """
_listener.start_if_necessary()
return button in _pressed_events
def press(button=LEFT):
""" Presses the given button (but doesn't release). """
_os_mouse.press(button)
def release(button=LEFT):
""" Releases the given button. """
_os_mouse.release(button)
def click(button=LEFT):
""" Sends a click with the given button. """
_os_mouse.press(button)
_os_mouse.release(button)
def double_click(button=LEFT):
""" Sends a double click with the given button. """
click(button)
click(button)
def right_click():
""" Sends a right click with the given button. """
click(RIGHT)
def wheel(delta=1):
""" Scrolls the wheel `delta` clicks. Sign indicates direction. """
_os_mouse.wheel(delta)
def move(x, y, absolute=True, duration=0):
"""
Moves the mouse. If `absolute`, to position (x, y), otherwise move relative
to the current position. If `duration` is non-zero, animates the movement.
"""
x = int(x)
y = int(y)
# Requires an extra system call on Linux, but `move_relative` is measured
# in millimiters so we would lose precision.
position_x, position_y = get_position()
if not absolute:
x = position_x + x
y = position_y + y
if duration:
start_x = position_x
start_y = position_y
dx = x - start_x
dy = y - start_y
if dx == 0 and dy == 0:
_time.sleep(duration)
else:
# 120 movements per second.
# Round and keep float to ensure float division in Python 2
steps = max(1.0, float(int(duration * 120.0)))
for i in range(int(steps)+1):
move(start_x + dx*i/steps, start_y + dy*i/steps)
_time.sleep(duration/steps)
else:
_os_mouse.move_to(x, y)
def drag(start_x, start_y, end_x, end_y, absolute=True, duration=0):
"""
Holds the left mouse button, moving from start to end position, then
releases. `absolute` and `duration` are parameters regarding the mouse
movement.
"""
if is_pressed():
release()
move(start_x, start_y, absolute, 0)
press()
move(end_x, end_y, absolute, duration)
release()
def on_button(callback, args=(), buttons=(LEFT, MIDDLE, RIGHT, X, X2), types=(UP, DOWN, DOUBLE)):
""" Invokes `callback` with `args` when the specified event happens. """
if not isinstance(buttons, (tuple, list)):
buttons = (buttons,)
if not isinstance(types, (tuple, list)):
types = (types,)
def handler(event):
if isinstance(event, ButtonEvent):
if event.event_type in types and event.button in buttons:
callback(*args)
_listener.add_handler(handler)
return handler
def on_click(callback, args=()):
""" Invokes `callback` with `args` when the left button is clicked. """
return on_button(callback, args, [LEFT], [UP])
def on_double_click(callback, args=()):
"""
Invokes `callback` with `args` when the left button is double clicked.
"""
return on_button(callback, args, [LEFT], [DOUBLE])
def on_right_click(callback, args=()):
""" Invokes `callback` with `args` when the right button is clicked. """
return on_button(callback, args, [RIGHT], [UP])
def on_middle_click(callback, args=()):
""" Invokes `callback` with `args` when the middle button is clicked. """
return on_button(callback, args, [MIDDLE], [UP])
def wait(button=LEFT, target_types=(UP, DOWN, DOUBLE)):
"""
Blocks program execution until the given button performs an event.
"""
from threading import Lock
lock = Lock()
lock.acquire()
handler = on_button(lock.release, (), [button], target_types)
lock.acquire()
_listener.remove_handler(handler)
def get_position():
""" Returns the (x, y) mouse position. """
return _os_mouse.get_position()
def hook(callback):
"""
Installs a global listener on all available mouses, invoking `callback`
each time it is moved, a key status changes or the wheel is spun. A mouse
event is passed as argument, with type either `mouse.ButtonEvent`,
`mouse.WheelEvent` or `mouse.MoveEvent`.
Returns the given callback for easier development.
"""
_listener.add_handler(callback)
return callback
def unhook(callback):
"""
Removes a previously installed hook.
"""
_listener.remove_handler(callback)
def unhook_all():
"""
Removes all hooks registered by this application. Note this may include
hooks installed by high level functions, such as `record`.
"""
del _listener.handlers[:]
def record(button=RIGHT, target_types=(DOWN,)):
"""
Records all mouse events until the user presses the given button.
Then returns the list of events recorded. Pairs well with `play(events)`.
Note: this is a blocking function.
Note: for more details on the mouse hook and events see `hook`.
"""
recorded = []
hook(recorded.append)
wait(button=button, target_types=target_types)
unhook(recorded.append)
return recorded
def play(events, speed_factor=1.0, include_clicks=True, include_moves=True, include_wheel=True):
"""
Plays a sequence of recorded events, maintaining the relative time
intervals. If speed_factor is <= 0 then the actions are replayed as fast
as the OS allows. Pairs well with `record()`.
The parameters `include_*` define if events of that type should be inluded
in the replay or ignored.
"""
last_time = None
for event in events:
if speed_factor > 0 and last_time is not None:
_time.sleep((event.time - last_time) / speed_factor)
last_time = event.time
if isinstance(event, ButtonEvent) and include_clicks:
if event.event_type == UP:
_os_mouse.release(event.button)
else:
_os_mouse.press(event.button)
elif isinstance(event, MoveEvent) and include_moves:
_os_mouse.move_to(event.x, event.y)
elif isinstance(event, WheelEvent) and include_wheel:
_os_mouse.wheel(event.delta)
replay = play
hold = press
if __name__ == '__main__':
print('Recording... Double click to stop and replay.')
play(record())
|
import json
import sys
from pprint import pformat
import click
import click_utils
import xml2json
import xml_utils
from reflection_utils import varsdict
def process(**kwargs):
ctx = click.get_current_context()
# debug_ = ctx.params.get('debug') or ctx.parent.params.get('debug') if ctx.parent else False
# if debug_:
# click_utils.echo_context(ctx)
click_utils.inherit_parent_params(ctx, ('debug',))
debug_ = ctx.params['debug']
if debug_:
click.echo('Debug mode: %s' % ('enabled' if debug_ else 'disabled'))
click_utils.echo_context(ctx)
click_utils.echo_kwargs(kwargs)
subcommand = ctx.invoked_subcommand
if subcommand == 'info':
pass
@click.group(context_settings=click_utils.CONTEXT_SETTINGS, invoke_without_command=True)
@click.version_option(version='1.0.0')
@click.option('--debug/--silent', '-d/-s', 'debug', default=False)
# @click.option('--debug', '-d', 'debug', flag_value=True, default=True)
# @click.option('--silent', '-s', 'debug', flag_value=False)
def cli(debug):
ctx = click.get_current_context()
if debug:
click_utils.echo_context(ctx)
subcommand = ctx.invoked_subcommand
# click.echo('Subcommand: {}'.format(subcommand))
if subcommand is None:
click.echo('I was invoked without a subcommand...')
else:
if debug:
click.echo('I am about to invoke subcommand: %s.' % subcommand)
@cli.command()
@click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
def echo(input, **kwargs):
"""
Echo the (unparsed) input.
"""
if not input:
input = '-'
with click.open_file(input, mode='rb') as f:
s = f.read()
click.echo(s)
@cli.command()
@click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
@click.option('--silent', '-s', is_flag=True, type=click.BOOL,
help='disables the normal console output.')
def validate(input, silent, **kwargs):
"""
Validates whether the input is syntactically valid and well-formed.
"""
if not input:
input = '-'
with click.open_file(input, mode='rb') as f:
b = xml_utils.contains_valid_xml(f)
if not silent:
click.echo(b)
if not b:
sys.exit(1) # exit with a failure code of 1
@cli.command()
@click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
@click.option('--verbose', '-v', is_flag=True, type=click.BOOL,
help='enables more detailed output.')
def info(input, verbose, **kwargs):
"""
Provides info about the input.
"""
if not input:
input = '-'
with click.open_file(input, mode='rb') as f:
data = xml_utils.load(f)
d = {
'length': len(data),
'tag': data.tag
}
if verbose:
d['_object'] = {
'type': type(data),
# 'repr': repr(data),
# 'vars': sorted(vars(data)),
# 'dir': sorted(dir(data)),
'members': sorted(varsdict(data).keys())
}
# click.echo(d)
# click.echo(sorted(d.items()))
if verbose:
s = pformat(d)
else:
s = json.dumps(d, indent=2, sort_keys=True)
click.echo(s)
# @cli.command()
# @click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
# help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
# def dominfo(input, **kwargs):
# """
# Provides info about the input.
# """
# if not input:
# input = '-'
# with click.open_file(input, mode='rb') as f:
# # process(**kwargs)
# # from xml.dom.minidom import parse, parseString
# # dom1 = parse('c:\\temp\\mydata.xml') # parse an XML file by name
# # datasource = open('c:\\temp\\mydata.xml')
# # dom2 = parse(datasource) # parse an open file
# # dom3 = parseString('<myxml>Some data<empty/> some more data</myxml>')
# from xml.dom.minidom import parse
# dom = parse(f)
# click.echo('\nINFO about:\n{}\n'.format(input))
# obj = vars(dom)
# obj = pformat(obj)
# click.echo('\nvars(dom):\n{obj}\n'.format(obj=obj))
# click.echo('\ndom content:\n{content}\n'.format(content=dom))
# @cli.command()
# @click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
# help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
# @click.option('--output', '-o', type=click.Path(exists=False), help='the path to the output file')
# @click.option('--encoding', '-e', type=click.Choice(['json', 'xml']), default='json',
# help='the encoding format to use for the output')
# @click.option('--verbose', '-v', is_flag=True, help='verbose mode')
# @click.option('--ini', '-i', type=click.Path(exists=True), help='the path to the INI file')
# def convert(**kwargs):
# """
# Reformat the input.
# """
# process(**kwargs)
@cli.command()
@click.argument('input', type=click.Path(exists=True, dir_okay=False))
# @click.argument('input', type=click.File('rb'))
@click.option('--pretty', '-p', is_flag=True, default=False, help='pretty format')
@click.option('--echo', '-e', is_flag=True, default=False, help='echo input')
@click.option('--stripwhitespace', '-sws', is_flag=True, default=False)
@click.option('--stripnamespace', '-sns', is_flag=True, default=False)
def tojson(input, pretty, echo, stripwhitespace, stripnamespace, **kwargs):
"""
Converts the XML input to JSON output.
"""
# output = xml2json.json2xml(input)
from collections import namedtuple
Xml2JsonOptions = namedtuple('Xml2JsonOptions', ['pretty'], verbose=False)
options = Xml2JsonOptions(pretty=pretty)
with open(input, mode='rb') as f:
xmlstring = f.read()
if echo:
click.echo('\nXML:')
click.echo(xmlstring)
click.echo('\nJSON:')
output = xml2json.xml2json(xmlstring, options=options, strip_ns=stripnamespace, strip=stripwhitespace)
# output = xml2json.elem2json(dom, options=options, strip_ns=None, strip=None)
# click.echo('\nJSON:\n{}\n'.format(output))
click.echo(output)
def main():
cli()
if __name__ == '__main__':
# main(default_map={
# 'info': {
# 'input': 'rfxml_parse.input.01.xml'
# }
# })
main()
xmltool: tojson command: added piped input support.
import json
import sys
from pprint import pformat
import click
import click_utils
import xml2json
import xml_utils
from reflection_utils import varsdict
def process(**kwargs):
ctx = click.get_current_context()
# debug_ = ctx.params.get('debug') or ctx.parent.params.get('debug') if ctx.parent else False
# if debug_:
# click_utils.echo_context(ctx)
click_utils.inherit_parent_params(ctx, ('debug',))
debug_ = ctx.params['debug']
if debug_:
click.echo('Debug mode: %s' % ('enabled' if debug_ else 'disabled'))
click_utils.echo_context(ctx)
click_utils.echo_kwargs(kwargs)
subcommand = ctx.invoked_subcommand
if subcommand == 'info':
pass
@click.group(context_settings=click_utils.CONTEXT_SETTINGS, invoke_without_command=True)
@click.version_option(version='1.0.0')
@click.option('--debug/--silent', '-d/-s', 'debug', default=False)
# @click.option('--debug', '-d', 'debug', flag_value=True, default=True)
# @click.option('--silent', '-s', 'debug', flag_value=False)
def cli(debug):
ctx = click.get_current_context()
if debug:
click_utils.echo_context(ctx)
subcommand = ctx.invoked_subcommand
# click.echo('Subcommand: {}'.format(subcommand))
if subcommand is None:
click.echo('I was invoked without a subcommand...')
else:
if debug:
click.echo('I am about to invoke subcommand: %s.' % subcommand)
@cli.command()
@click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
def echo(input, **kwargs):
"""
Echo the (unparsed) input.
"""
if not input:
input = '-'
with click.open_file(input, mode='rb') as f:
s = f.read()
click.echo(s)
@cli.command()
@click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
@click.option('--silent', '-s', is_flag=True, type=click.BOOL,
help='disables the normal console output.')
def validate(input, silent, **kwargs):
"""
Validates whether the input is syntactically valid and well-formed.
"""
if not input:
input = '-'
with click.open_file(input, mode='rb') as f:
b = xml_utils.contains_valid_xml(f)
if not silent:
click.echo(b)
if not b:
sys.exit(1) # exit with a failure code of 1
@cli.command()
@click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
@click.option('--verbose', '-v', is_flag=True, type=click.BOOL,
help='enables more detailed output.')
def info(input, verbose, **kwargs):
"""
Provides info about the input.
"""
if not input:
input = '-'
with click.open_file(input, mode='rb') as f:
data = xml_utils.load(f)
d = {
'length': len(data),
'tag': data.tag
}
if verbose:
d['_object'] = {
'type': type(data),
# 'repr': repr(data),
# 'vars': sorted(vars(data)),
# 'dir': sorted(dir(data)),
'members': sorted(varsdict(data).keys())
}
# click.echo(d)
# click.echo(sorted(d.items()))
if verbose:
s = pformat(d)
else:
s = json.dumps(d, indent=2, sort_keys=True)
click.echo(s)
# @cli.command()
# @click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
# help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
# def dominfo(input, **kwargs):
# """
# Provides info about the input.
# """
# if not input:
# input = '-'
# with click.open_file(input, mode='rb') as f:
# # process(**kwargs)
# # from xml.dom.minidom import parse, parseString
# # dom1 = parse('c:\\temp\\mydata.xml') # parse an XML file by name
# # datasource = open('c:\\temp\\mydata.xml')
# # dom2 = parse(datasource) # parse an open file
# # dom3 = parseString('<myxml>Some data<empty/> some more data</myxml>')
# from xml.dom.minidom import parse
# dom = parse(f)
# click.echo('\nINFO about:\n{}\n'.format(input))
# obj = vars(dom)
# obj = pformat(obj)
# click.echo('\nvars(dom):\n{obj}\n'.format(obj=obj))
# click.echo('\ndom content:\n{content}\n'.format(content=dom))
# @cli.command()
# @click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
# help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
# @click.option('--output', '-o', type=click.Path(exists=False), help='the path to the output file')
# @click.option('--encoding', '-e', type=click.Choice(['json', 'xml']), default='json',
# help='the encoding format to use for the output')
# @click.option('--verbose', '-v', is_flag=True, help='verbose mode')
# @click.option('--ini', '-i', type=click.Path(exists=True), help='the path to the INI file')
# def convert(**kwargs):
# """
# Reformat the input.
# """
# process(**kwargs)
@cli.command()
@click.option('--input', '-i', type=click.Path(exists=True, dir_okay=False, allow_dash=True),
help="the path to the file containing the input. Or '-' to use stdin (e.g. piped input).")
# @click.argument('input', type=click.File('rb'))
@click.option('--pretty', '-p', is_flag=True, default=False, help='pretty format')
@click.option('--echo', '-e', is_flag=True, default=False, help='echo input')
@click.option('--stripwhitespace', '-sws', is_flag=True, default=False)
@click.option('--stripnamespace', '-sns', is_flag=True, default=False)
def tojson(input, pretty, echo, stripwhitespace, stripnamespace, **kwargs):
"""
Converts the XML input to JSON output.
"""
# output = xml2json.json2xml(input)
from collections import namedtuple
Xml2JsonOptions = namedtuple('Xml2JsonOptions', ['pretty'], verbose=False)
options = Xml2JsonOptions(pretty=pretty)
if not input:
input = '-'
with click.open_file(input, mode='rb') as f:
xmlstring = f.read()
if echo:
click.echo('\nXML:')
click.echo(xmlstring)
click.echo('\nJSON:')
output = xml2json.xml2json(xmlstring, options=options, strip_ns=stripnamespace, strip=stripwhitespace)
# output = xml2json.elem2json(dom, options=options, strip_ns=None, strip=None)
# click.echo('\nJSON:\n{}\n'.format(output))
click.echo(output)
def main():
cli()
if __name__ == '__main__':
# main(default_map={
# 'info': {
# 'input': 'rfxml_parse.input.01.xml'
# }
# })
main()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2008 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005-2007 Christian Boos <cboos@neuf.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
import csv
from math import ceil
from datetime import datetime, timedelta
import re
from StringIO import StringIO
from genshi.builder import tag
from trac.config import Option, IntOption
from trac.core import *
from trac.db import get_column_names
from trac.mimeview.api import Mimeview, IContentConverter, Context
from trac.resource import Resource
from trac.ticket.api import TicketSystem
from trac.util import Ranges
from trac.util.compat import groupby
from trac.util.datefmt import to_timestamp, utc
from trac.util.presentation import Paginator
from trac.util.text import shorten_line
from trac.util.translation import _, tag_
from trac.web import IRequestHandler
from trac.web.href import Href
from trac.web.chrome import add_ctxtnav, add_link, add_script, add_stylesheet, \
INavigationContributor, Chrome
from trac.wiki.api import IWikiSyntaxProvider, parse_args
from trac.wiki.macros import WikiMacroBase # TODO: should be moved in .api
class QuerySyntaxError(Exception):
"""Exception raised when a ticket query cannot be parsed from a string."""
class Query(object):
def __init__(self, env, report=None, constraints=None, cols=None,
order=None, desc=0, group=None, groupdesc=0, verbose=0,
rows=None, page=None, max=None):
self.env = env
self.id = report # if not None, it's the corresponding saved query
self.constraints = constraints or {}
self.order = order
self.desc = desc
self.group = group
self.groupdesc = groupdesc
self.default_page = 1
self.items_per_page = QueryModule(self.env).items_per_page
self.substitutions = ['$USER']
# getting page number (default_page if unspecified)
if not page:
page = self.default_page
try:
self.page = int(page)
if self.page < 1:
raise ValueError()
except ValueError:
raise TracError(_('Query page %(page)s is invalid.', page=page))
# max=0 signifies showing all items on one page
# max=n will show precisely n items on all pages except the last
# max<0 is invalid
if max in ('none', ''):
max = 0
if max is None: # meaning unspecified
max = self.items_per_page
try:
self.max = int(max)
if self.max < 0:
raise ValueError()
except ValueError:
raise TracError(_('Query max %(max)s is invalid.', max=max))
if self.max == 0:
self.has_more_pages = False
self.offset = 0
else:
self.has_more_pages = True
self.offset = self.max * (self.page - 1)
if rows == None:
rows = []
if verbose and 'description' not in rows: # 0.10 compatibility
rows.append('description')
self.fields = TicketSystem(self.env).get_ticket_fields()
field_names = [f['name'] for f in self.fields]
self.cols = [c for c in cols or [] if c in field_names or
c in ('id', 'time', 'changetime')]
self.rows = [c for c in rows if c in field_names]
if self.order != 'id' and self.order not in field_names:
# TODO: fix after adding time/changetime to the api.py
if order == 'created':
order = 'time'
elif order == 'modified':
order = 'changetime'
if order in ('time', 'changetime'):
self.order = order
else:
self.order = 'priority'
if self.group not in field_names:
self.group = None
def from_string(cls, env, string, **kw):
filters = string.split('&')
kw_strs = ['order', 'group', 'page', 'max']
kw_arys = ['rows']
kw_bools = ['desc', 'groupdesc', 'verbose']
constraints = {}
cols = []
for filter_ in filters:
filter_ = filter_.split('=')
if len(filter_) != 2:
raise QuerySyntaxError(_('Query filter requires field and '
'constraints separated by a "="'))
field,values = filter_
if not field:
raise QuerySyntaxError(_('Query filter requires field name'))
# from last char of `field`, get the mode of comparison
mode, neg = '', ''
if field[-1] in ('~', '^', '$') \
and not field in self.substitutions:
mode = field[-1]
field = field[:-1]
if field[-1] == '!':
neg = '!'
field = field[:-1]
processed_values = []
for val in values.split('|'):
val = neg + mode + val # add mode of comparison
processed_values.append(val)
try:
field = str(field)
if field in kw_strs:
kw[field] = processed_values[0]
elif field in kw_arys:
kw[field] = processed_values
elif field in kw_bools:
kw[field] = True
elif field == 'col':
cols.extend(processed_values)
else:
constraints[field] = processed_values
except UnicodeError:
pass # field must be a str, see `get_href()`
report = constraints.pop('report', None)
report = kw.pop('report', report)
return cls(env, report, constraints=constraints, cols=cols, **kw)
from_string = classmethod(from_string)
def get_columns(self):
if not self.cols:
self.cols = self.get_default_columns()
if not 'id' in self.cols:
# make sure 'id' is always present (needed for permission checks)
self.cols.insert(0, 'id')
return self.cols
def get_all_textareas(self):
return [f['name'] for f in self.fields if f['type'] == 'textarea']
def get_all_columns(self):
# Prepare the default list of columns
cols = ['id']
cols += [f['name'] for f in self.fields if f['type'] != 'textarea']
for col in ('reporter', 'keywords', 'cc'):
if col in cols:
cols.remove(col)
cols.append(col)
# TODO: fix after adding time/changetime to the api.py
cols += ['time', 'changetime']
# Semi-intelligently remove columns that are restricted to a single
# value by a query constraint.
for col in [k for k in self.constraints.keys()
if k != 'id' and k in cols]:
constraint = self.constraints[col]
if len(constraint) == 1 and constraint[0] \
and not constraint[0][0] in ('!', '~', '^', '$'):
if col in cols:
cols.remove(col)
if col == 'status' and not 'closed' in constraint \
and 'resolution' in cols:
cols.remove('resolution')
if self.group in cols:
cols.remove(self.group)
def sort_columns(col1, col2):
constrained_fields = self.constraints.keys()
if 'id' in (col1, col2):
# Ticket ID is always the first column
return col1 == 'id' and -1 or 1
elif 'summary' in (col1, col2):
# Ticket summary is always the second column
return col1 == 'summary' and -1 or 1
elif col1 in constrained_fields or col2 in constrained_fields:
# Constrained columns appear before other columns
return col1 in constrained_fields and -1 or 1
return 0
cols.sort(sort_columns)
return cols
def get_default_columns(self):
all_cols = self.get_all_columns()
# Only display the first seven columns by default
cols = all_cols[:7]
# Make sure the column we order by is visible, if it isn't also
# the column we group by
if not self.order in cols and not self.order == self.group:
cols[-1] = self.order
return cols
def count(self, req, db=None, cached_ids=None):
sql, args = self.get_sql(req, cached_ids)
return self._count(sql, args)
def _count(self, sql, args, db=None):
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
count_sql = 'SELECT COUNT(*) FROM (' + sql + ') AS foo'
# self.env.log.debug("Count results in Query SQL: " + count_sql %
# tuple([repr(a) for a in args]))
cnt = 0
cursor.execute(count_sql, args);
for cnt, in cursor:
break
self.env.log.debug("Count results in Query: %d" % cnt)
return cnt
def execute(self, req, db=None, cached_ids=None):
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
sql, args = self.get_sql(req, cached_ids)
self.num_items = self._count(sql, args, db)
if self.num_items <= self.max:
self.has_more_pages = False
if self.has_more_pages:
max = self.max
if self.group:
max += 1
sql = sql + " LIMIT %d OFFSET %d" % (max, self.offset)
if (self.page > int(ceil(float(self.num_items) / self.max)) and
self.num_items != 0):
raise TracError(_('Page %(page)s is beyond the number of '
'pages in the query', page=self.page))
self.env.log.debug("Query SQL: " + sql % tuple([repr(a) for a in args]))
cursor.execute(sql, args)
columns = get_column_names(cursor)
fields = []
for column in columns:
fields += [f for f in self.fields if f['name'] == column] or [None]
results = []
column_indices = range(len(columns))
for row in cursor:
result = {}
for i in column_indices:
name, field, val = columns[i], fields[i], row[i]
if name == self.group:
val = val or 'None'
elif name == 'reporter':
val = val or 'anonymous'
elif name == 'id':
val = int(val)
result['href'] = req.href.ticket(val)
elif val is None:
val = '--'
elif name in ('changetime', 'time'):
val = datetime.fromtimestamp(int(val or 0), utc)
elif field and field['type'] == 'checkbox':
try:
val = bool(int(val))
except TypeError, ValueError:
val = False
result[name] = val
results.append(result)
cursor.close()
return results
def get_href(self, href, id=None, order=None, desc=None, format=None,
max=None, page=None):
"""Create a link corresponding to this query.
:param href: the `Href` object used to build the URL
:param id: optionally set or override the report `id`
:param order: optionally override the order parameter of the query
:param desc: optionally override the desc parameter
:param format: optionally override the format of the query
:param max: optionally override the max items per page
:param page: optionally specify which page of results (defaults to
the first)
Note: `get_resource_url` of a 'query' resource?
"""
if not isinstance(href, Href):
href = href.href # compatibility with the `req` of the 0.10 API
if format == 'rss':
max = self.items_per_page
page = self.default_page
if id is None:
id = self.id
if desc is None:
desc = self.desc
if order is None:
order = self.order
if max is None:
max = self.max
if page is None:
page = self.page
cols = self.get_columns()
# don't specify the columns in the href if they correspond to
# the default columns, page and max in the same order. That keeps the
# query url shorter in the common case where we just want the default
# columns.
if cols == self.get_default_columns():
cols = None
if page == self.default_page:
page = None
if max == self.items_per_page:
max = None
return href.query(report=id,
order=order, desc=desc and 1 or None,
group=self.group or None,
groupdesc=self.groupdesc and 1 or None,
col=cols,
row=self.rows,
max=max,
page=page,
format=format, **self.constraints)
def to_string(self):
"""Return a user readable and editable representation of the query.
Note: for now, this is an "exploded" query href, but ideally should be
expressed in TracQuery language.
"""
query_string = self.get_href(Href(''))
if query_string and '?' in query_string:
query_string = query_string.split('?', 1)[1]
return 'query:?' + query_string.replace('&', '\n&\n')
def get_sql(self, req=None, cached_ids=None):
"""Return a (sql, params) tuple for the query."""
self.get_columns()
enum_columns = ('resolution', 'priority', 'severity')
# Build the list of actual columns to query
cols = self.cols[:]
def add_cols(*args):
for col in args:
if not col in cols:
cols.append(col)
if self.group and not self.group in cols:
add_cols(self.group)
if self.rows:
add_cols('reporter', *self.rows)
add_cols('priority', 'time', 'changetime', self.order)
cols.extend([c for c in self.constraints.keys() if not c in cols])
custom_fields = [f['name'] for f in self.fields if 'custom' in f]
sql = []
sql.append("SELECT " + ",".join(['t.%s AS %s' % (c, c) for c in cols
if c not in custom_fields]))
sql.append(",priority.value AS priority_value")
for k in [k for k in cols if k in custom_fields]:
sql.append(",%s.value AS %s" % (k, k))
sql.append("\nFROM ticket AS t")
# Join with ticket_custom table as necessary
for k in [k for k in cols if k in custom_fields]:
sql.append("\n LEFT OUTER JOIN ticket_custom AS %s ON " \
"(id=%s.ticket AND %s.name='%s')" % (k, k, k, k))
# Join with the enum table for proper sorting
for col in [c for c in enum_columns
if c == self.order or c == self.group or c == 'priority']:
sql.append("\n LEFT OUTER JOIN enum AS %s ON "
"(%s.type='%s' AND %s.name=%s)"
% (col, col, col, col, col))
# Join with the version/milestone tables for proper sorting
for col in [c for c in ['milestone', 'version']
if c == self.order or c == self.group]:
sql.append("\n LEFT OUTER JOIN %s ON (%s.name=%s)"
% (col, col, col))
def get_constraint_sql(name, value, mode, neg):
if name not in custom_fields:
name = 't.' + name
else:
name = name + '.value'
value = value[len(mode) + neg:]
if mode == '':
return ("COALESCE(%s,'')%s=%%s" % (name, neg and '!' or ''),
value)
if not value:
return None
db = self.env.get_db_cnx()
value = db.like_escape(value)
if mode == '~':
value = '%' + value + '%'
elif mode == '^':
value = value + '%'
elif mode == '$':
value = '%' + value
return ("COALESCE(%s,'') %s%s" % (name, neg and 'NOT ' or '',
db.like()),
value)
clauses = []
args = []
for k, v in self.constraints.items():
if req:
v = [val.replace('$USER', req.authname) for val in v]
# Determine the match mode of the constraint (contains,
# starts-with, negation, etc.)
neg = v[0].startswith('!')
mode = ''
if len(v[0]) > neg and v[0][neg] in ('~', '^', '$'):
mode = v[0][neg]
# Special case id ranges
if k == 'id':
ranges = Ranges()
for r in v:
r = r.replace('!', '')
ranges.appendrange(r)
ids = []
id_clauses = []
for a,b in ranges.pairs:
if a == b:
ids.append(str(a))
else:
id_clauses.append('id BETWEEN %s AND %s')
args.append(a)
args.append(b)
if ids:
id_clauses.append('id IN (%s)' % (','.join(ids)))
if id_clauses:
clauses.append('%s(%s)' % (neg and 'NOT ' or '',
' OR '.join(id_clauses)))
# Special case for exact matches on multiple values
elif not mode and len(v) > 1:
if k not in custom_fields:
col = 't.' + k
else:
col = k + '.value'
clauses.append("COALESCE(%s,'') %sIN (%s)"
% (col, neg and 'NOT ' or '',
','.join(['%s' for val in v])))
args += [val[neg:] for val in v]
elif len(v) > 1:
constraint_sql = filter(None,
[get_constraint_sql(k, val, mode, neg)
for val in v])
if not constraint_sql:
continue
if neg:
clauses.append("(" + " AND ".join(
[item[0] for item in constraint_sql]) + ")")
else:
clauses.append("(" + " OR ".join(
[item[0] for item in constraint_sql]) + ")")
args += [item[1] for item in constraint_sql]
elif len(v) == 1:
constraint_sql = get_constraint_sql(k, v[0], mode, neg)
if constraint_sql:
clauses.append(constraint_sql[0])
args.append(constraint_sql[1])
clauses = filter(None, clauses)
if clauses or cached_ids:
sql.append("\nWHERE ")
if clauses:
sql.append(" AND ".join(clauses))
if cached_ids:
if clauses:
sql.append(" OR ")
sql.append("id in (%s)" % (','.join(
[str(id) for id in cached_ids])))
sql.append("\nORDER BY ")
order_cols = [(self.order, self.desc)]
if self.group and self.group != self.order:
order_cols.insert(0, (self.group, self.groupdesc))
for name, desc in order_cols:
if name in custom_fields or name in enum_columns:
col = name + '.value'
else:
col = 't.' + name
# FIXME: This is a somewhat ugly hack. Can we also have the
# column type for this? If it's an integer, we do first
# one, if text, we do 'else'
if name in ('id', 'time', 'changetime'):
if desc:
sql.append("COALESCE(%s,0)=0 DESC," % col)
else:
sql.append("COALESCE(%s,0)=0," % col)
else:
if desc:
sql.append("COALESCE(%s,'')='' DESC," % col)
else:
sql.append("COALESCE(%s,'')=''," % col)
if name in enum_columns:
# These values must be compared as ints, not as strings
db = self.env.get_db_cnx()
if desc:
sql.append(db.cast(col, 'int') + ' DESC')
else:
sql.append(db.cast(col, 'int'))
elif name in ('milestone', 'version'):
if name == 'milestone':
time_col = 'milestone.due'
else:
time_col = 'version.time'
if desc:
sql.append("COALESCE(%s,0)=0 DESC,%s DESC,%s DESC"
% (time_col, time_col, col))
else:
sql.append("COALESCE(%s,0)=0,%s,%s"
% (time_col, time_col, col))
else:
if desc:
sql.append("%s DESC" % col)
else:
sql.append("%s" % col)
if name == self.group and not name == self.order:
sql.append(",")
if self.order != 'id':
sql.append(",t.id")
return "".join(sql), args
def template_data(self, context, tickets, orig_list=None, orig_time=None,
req=None):
constraints = {}
for k, v in self.constraints.items():
constraint = {'values': [], 'mode': ''}
for val in v:
neg = val.startswith('!')
if neg:
val = val[1:]
mode = ''
if val[:1] in ('~', '^', '$') \
and not val in self.substitutions:
mode, val = val[:1], val[1:]
constraint['mode'] = (neg and '!' or '') + mode
constraint['values'].append(val)
constraints[k] = constraint
cols = self.get_columns()
labels = dict([(f['name'], f['label']) for f in self.fields])
# TODO: remove after adding time/changetime to the api.py
labels['changetime'] = _('Modified')
labels['time'] = _('Created')
headers = [{
'name': col, 'label': labels.get(col, _('Ticket')),
'href': self.get_href(context.href, order=col,
desc=(col == self.order and not self.desc))
} for col in cols]
fields = {}
for field in self.fields:
if field['type'] == 'textarea':
continue
field_data = {}
field_data.update(field)
del field_data['name']
fields[field['name']] = field_data
modes = {}
modes['text'] = [
{'name': _("contains"), 'value': "~"},
{'name': _("doesn't contain"), 'value': "!~"},
{'name': _("begins with"), 'value': "^"},
{'name': _("ends with"), 'value': "$"},
{'name': _("is"), 'value': ""},
{'name': _("is not"), 'value': "!"}
]
modes['select'] = [
{'name': _("is"), 'value': ""},
{'name': _("is not"), 'value': "!"}
]
groups = {}
groupsequence = []
for ticket in tickets:
if orig_list:
# Mark tickets added or changed since the query was first
# executed
if ticket['time'] > orig_time:
ticket['added'] = True
elif ticket['changetime'] > orig_time:
ticket['changed'] = True
if self.group:
group_key = ticket[self.group]
groups.setdefault(group_key, []).append(ticket)
if not groupsequence or group_key not in groupsequence:
groupsequence.append(group_key)
groupsequence = [(value, groups[value]) for value in groupsequence]
# detect whether the last group continues on the next page,
# by checking if the extra (max+1)th ticket is in the last group
last_group_is_partial = False
if groupsequence and self.max and len(tickets) == self.max + 1:
del tickets[-1]
if len(groupsequence[-1][1]) == 1:
# additional ticket started a new group
del groupsequence[-1] # remove that additional group
else:
# additional ticket stayed in the group
last_group_is_partial = True
del groupsequence[-1][1][-1] # remove the additional ticket
results = Paginator(tickets,
self.page - 1,
self.max,
self.num_items)
if req:
if results.has_next_page:
next_href = self.get_href(req.href, max=self.max,
page=self.page + 1)
add_link(req, 'next', next_href, _('Next Page'))
if results.has_previous_page:
prev_href = self.get_href(req.href, max=self.max,
page=self.page - 1)
add_link(req, 'prev', prev_href, _('Previous Page'))
else:
results.show_index = False
pagedata = []
shown_pages = results.get_shown_pages(21)
for page in shown_pages:
pagedata.append([self.get_href(context.href, page=page), None,
str(page), _('Page %(num)d', num=page)])
results.shown_pages = [dict(zip(['href', 'class', 'string', 'title'],
p)) for p in pagedata]
results.current_page = {'href': None, 'class': 'current',
'string': str(results.page + 1),
'title':None}
return {'query': self,
'context': context,
'col': cols,
'row': self.rows,
'constraints': constraints,
'labels': labels,
'headers': headers,
'fields': fields,
'modes': modes,
'tickets': tickets,
'groups': groupsequence or [(None, tickets)],
'last_group_is_partial': last_group_is_partial,
'paginator': results}
class QueryModule(Component):
implements(IRequestHandler, INavigationContributor, IWikiSyntaxProvider,
IContentConverter)
default_query = Option('query', 'default_query',
default='status!=closed&owner=$USER',
doc='The default query for authenticated users.')
default_anonymous_query = Option('query', 'default_anonymous_query',
default='status!=closed&cc~=$USER',
doc='The default query for anonymous users.')
items_per_page = IntOption('query', 'items_per_page', 100,
"""Number of tickets displayed per page in ticket queries,
by default (''since 0.11'')""")
# IContentConverter methods
def get_supported_conversions(self):
yield ('rss', _('RSS Feed'), 'xml',
'trac.ticket.Query', 'application/rss+xml', 8)
yield ('csv', _('Comma-delimited Text'), 'csv',
'trac.ticket.Query', 'text/csv', 8)
yield ('tab', _('Tab-delimited Text'), 'tsv',
'trac.ticket.Query', 'text/tab-separated-values', 8)
def convert_content(self, req, mimetype, query, key):
if key == 'rss':
return self.export_rss(req, query)
elif key == 'csv':
return self.export_csv(req, query, mimetype='text/csv')
elif key == 'tab':
return self.export_csv(req, query, '\t',
mimetype='text/tab-separated-values')
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'tickets'
def get_navigation_items(self, req):
from trac.ticket.report import ReportModule
if 'TICKET_VIEW' in req.perm and \
not self.env.is_component_enabled(ReportModule):
yield ('mainnav', 'tickets',
tag.a(_('View Tickets'), href=req.href.query()))
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/query'
def process_request(self, req):
req.perm.assert_permission('TICKET_VIEW')
constraints = self._get_constraints(req)
if not constraints and not 'order' in req.args:
# If no constraints are given in the URL, use the default ones.
if req.authname and req.authname != 'anonymous':
qstring = self.default_query
user = req.authname
else:
email = req.session.get('email')
name = req.session.get('name')
qstring = self.default_anonymous_query
user = email or name or None
if user:
qstring = qstring.replace('$USER', user)
self.log.debug('QueryModule: Using default query: %s', str(qstring))
constraints = Query.from_string(self.env, qstring).constraints
# Ensure no field constraints that depend on $USER are used
# if we have no username.
for field, vals in constraints.items():
for val in vals:
if val.endswith('$USER'):
del constraints[field]
cols = req.args.get('col')
if isinstance(cols, basestring):
cols = [cols]
# Since we don't show 'id' as an option to the user,
# we need to re-insert it here.
if cols and 'id' not in cols:
cols.insert(0, 'id')
rows = req.args.get('row', [])
if isinstance(rows, basestring):
rows = [rows]
format = req.args.get('format')
max = req.args.get('max')
if max is None and format in ('csv', 'tab'):
max = 0 # unlimited unless specified explicitly
query = Query(self.env, req.args.get('report'),
constraints, cols, req.args.get('order'),
'desc' in req.args, req.args.get('group'),
'groupdesc' in req.args, 'verbose' in req.args,
rows,
req.args.get('page'),
max)
if 'update' in req.args:
# Reset session vars
for var in ('query_constraints', 'query_time', 'query_tickets'):
if var in req.session:
del req.session[var]
req.redirect(query.get_href(req.href))
# Add registered converters
for conversion in Mimeview(self.env).get_supported_conversions(
'trac.ticket.Query'):
add_link(req, 'alternate',
query.get_href(req.href, format=conversion[0]),
conversion[1], conversion[4], conversion[0])
if format:
Mimeview(self.env).send_converted(req, 'trac.ticket.Query', query,
format, 'query')
return self.display_html(req, query)
# Internal methods
def _get_constraints(self, req):
constraints = {}
ticket_fields = [f['name'] for f in
TicketSystem(self.env).get_ticket_fields()]
ticket_fields.append('id')
# For clients without JavaScript, we remove constraints here if
# requested
remove_constraints = {}
to_remove = [k[10:] for k in req.args.keys()
if k.startswith('rm_filter_')]
if to_remove: # either empty or containing a single element
match = re.match(r'(\w+?)_(\d+)$', to_remove[0])
if match:
remove_constraints[match.group(1)] = int(match.group(2))
else:
remove_constraints[to_remove[0]] = -1
for field in [k for k in req.args.keys() if k in ticket_fields]:
vals = req.args[field]
if not isinstance(vals, (list, tuple)):
vals = [vals]
if vals:
mode = req.args.get(field + '_mode')
if mode:
vals = [mode + x for x in vals]
if field in remove_constraints:
idx = remove_constraints[field]
if idx >= 0:
del vals[idx]
if not vals:
continue
else:
continue
constraints[field] = vals
return constraints
def display_html(self, req, query):
db = self.env.get_db_cnx()
# The most recent query is stored in the user session;
orig_list = None
orig_time = datetime.now(utc)
query_time = int(req.session.get('query_time', 0))
query_time = datetime.fromtimestamp(query_time, utc)
query_constraints = unicode(query.constraints)
if query_constraints != req.session.get('query_constraints') \
or query_time < orig_time - timedelta(hours=1):
tickets = query.execute(req, db)
# New or outdated query, (re-)initialize session vars
req.session['query_constraints'] = query_constraints
req.session['query_tickets'] = ' '.join([str(t['id'])
for t in tickets])
else:
orig_list = [int(id) for id
in req.session.get('query_tickets', '').split()]
tickets = query.execute(req, db, orig_list)
orig_time = query_time
context = Context.from_request(req, 'query')
data = query.template_data(context, tickets, orig_list, orig_time, req)
# For clients without JavaScript, we add a new constraint here if
# requested
constraints = data['constraints']
if 'add' in req.args:
field = req.args.get('add_filter')
if field:
constraint = constraints.setdefault(field, {})
constraint.setdefault('values', []).append('')
# FIXME: '' not always correct (e.g. checkboxes)
req.session['query_href'] = query.get_href(context.href)
req.session['query_time'] = to_timestamp(orig_time)
req.session['query_tickets'] = ' '.join([str(t['id'])
for t in tickets])
title = _('Custom Query')
# Only interact with the report module if it is actually enabled.
#
# Note that with saved custom queries, there will be some convergence
# between the report module and the query module.
from trac.ticket.report import ReportModule
if 'REPORT_VIEW' in req.perm and \
self.env.is_component_enabled(ReportModule):
data['report_href'] = req.href.report()
add_ctxtnav(req, _('Available Reports'), req.href.report())
add_ctxtnav(req, _('Custom Query'))
if query.id:
cursor = db.cursor()
cursor.execute("SELECT title,description FROM report "
"WHERE id=%s", (query.id,))
for title, description in cursor:
data['report_resource'] = Resource('report', query.id)
data['description'] = description
else:
data['report_href'] = None
data.setdefault('report', None)
data.setdefault('description', None)
data['title'] = title
data['all_columns'] = query.get_all_columns()
# Don't allow the user to remove the id column
data['all_columns'].remove('id')
data['all_textareas'] = query.get_all_textareas()
add_stylesheet(req, 'common/css/report.css')
add_script(req, 'common/js/query.js')
return 'query.html', data, None
def export_csv(self, req, query, sep=',', mimetype='text/plain'):
content = StringIO()
cols = query.get_columns()
writer = csv.writer(content, delimiter=sep)
writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL)
writer.writerow([unicode(c).encode('utf-8') for c in cols])
context = Context.from_request(req)
results = query.execute(req, self.env.get_db_cnx())
for result in results:
ticket = Resource('ticket', result['id'])
if 'TICKET_VIEW' in req.perm(ticket):
values = []
for col in cols:
value = result[col]
if col in ('cc', 'reporter'):
value = Chrome(self.env).format_emails(context(ticket),
value)
values.append(unicode(value).encode('utf-8'))
writer.writerow(values)
return (content.getvalue(), '%s;charset=utf-8' % mimetype)
def export_rss(self, req, query):
if 'description' not in query.rows:
query.rows.append('description')
db = self.env.get_db_cnx()
results = query.execute(req, db)
query_href = req.abs_href.query(group=query.group,
groupdesc=(query.groupdesc and 1
or None),
row=query.rows,
page=req.args.get('page'),
max=req.args.get('max'),
**query.constraints)
data = {
'context': Context.from_request(req, 'query', absurls=True),
'results': results,
'query_href': query_href
}
output = Chrome(self.env).render_template(req, 'query.rss', data,
'application/rss+xml')
return output, 'application/rss+xml'
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('query', self._format_link)
def _format_link(self, formatter, ns, query, label):
if query.startswith('?'):
return tag.a(label, class_='query',
href=formatter.href.query() + query.replace(' ', '+'))
else:
try:
query = Query.from_string(self.env, query)
return tag.a(label,
href=query.get_href(formatter.context.href),
class_='query')
except QuerySyntaxError, e:
return tag.em(_('[Error: %(error)s]', error=e), class_='error')
class TicketQueryMacro(WikiMacroBase):
"""Macro that lists tickets that match certain criteria.
This macro accepts a comma-separated list of keyed parameters,
in the form "key=value".
If the key is the name of a field, the value must use the syntax
of a filter specifier as defined in TracQuery#QueryLanguage.
Note that this is ''not'' the same as the simplified URL syntax
used for `query:` links starting with a `?` character.
In addition to filters, several other named parameters can be used
to control how the results are presented. All of them are optional.
The `format` parameter determines how the list of tickets is
presented:
- '''list''' -- the default presentation is to list the ticket ID next
to the summary, with each ticket on a separate line.
- '''compact''' -- the tickets are presented as a comma-separated
list of ticket IDs.
- '''count''' -- only the count of matching tickets is displayed
- '''table''' -- a view similar to the custom query view (but without
the controls)
The `max` parameter can be used to limit the number of tickets shown
(defaults to '''0''', i.e. no maximum).
The `order` parameter sets the field used for ordering tickets
(defaults to '''id''').
The `desc` parameter indicates whether the order of the tickets
should be reversed (defaults to '''false''').
The `group` parameter sets the field used for grouping tickets
(defaults to not being set).
The `groupdesc` parameter indicates whether the natural display
order of the groups should be reversed (defaults to '''false''').
The `verbose` parameter can be set to a true value in order to
get the description for the listed tickets. For '''table''' format only.
''deprecated in favor of the `rows` parameter''
The `rows` parameter can be used to specify which field(s) should
be viewed as a row, e.g. `rows=description|summary`
For compatibility with Trac 0.10, if there's a second positional parameter
given to the macro, it will be used to specify the `format`.
Also, using "&" as a field separator still works but is deprecated.
"""
def expand_macro(self, formatter, name, content):
req = formatter.req
query_string = ''
argv, kwargs = parse_args(content, strict=False)
if len(argv) > 0 and not 'format' in kwargs: # 0.10 compatibility hack
kwargs['format'] = argv[0]
if 'order' not in kwargs:
kwargs['order'] = 'id'
if 'max' not in kwargs:
kwargs['max'] = '0' # unlimited by default
format = kwargs.pop('format', 'list').strip().lower()
if format in ('list', 'compact'): # we need 'status' and 'summary'
kwargs['col'] = '|'.join(['status', 'summary',
kwargs.get('col', '')])
query_string = '&'.join(['%s=%s' % item
for item in kwargs.iteritems()])
query = Query.from_string(self.env, query_string)
if format == 'count':
cnt = query.count(req)
return tag.span(cnt, title='%d tickets for which %s' %
(cnt, query_string), class_='query_count')
tickets = query.execute(req)
if format == 'table':
data = query.template_data(formatter.context, tickets)
add_stylesheet(req, 'common/css/report.css')
return Chrome(self.env).render_template(
req, 'query_results.html', data, None, fragment=True)
# 'table' format had its own permission checks, here we need to
# do it explicitly:
tickets = [t for t in tickets
if 'TICKET_VIEW' in req.perm('ticket', t['id'])]
if not tickets:
return tag.span(_("No results"), class_='query_no_results')
def ticket_anchor(ticket):
return tag.a('#%s' % ticket['id'],
class_=ticket['status'],
href=req.href.ticket(int(ticket['id'])),
title=shorten_line(ticket['summary']))
def ticket_groups():
groups = []
for v, g in groupby(tickets, lambda t: t[query.group]):
q = Query.from_string(self.env, query_string)
# produce the hint for the group
q.group = q.groupdesc = None
order = q.order
q.order = None
title = "%s %s tickets matching %s" % (v, query.group,
q.to_string())
# produce the href for the query corresponding to the group
q.constraints[str(query.group)] = v
q.order = order
href = q.get_href(formatter.context)
groups.append((v, [t for t in g], href, title))
return groups
if format == 'compact':
if query.group:
groups = [tag.a('#%s' % ','.join([str(t['id'])
for t in g]),
href=href, class_='query', title=title)
for v, g, href, title in ticket_groups()]
return tag(groups[0], [(', ', g) for g in groups[1:]])
else:
alist = [ticket_anchor(ticket) for ticket in tickets]
return tag.span(alist[0], *[(', ', a) for a in alist[1:]])
else:
if query.group:
return tag.div(
[(tag.p(tag_('%(groupvalue)s %(groupname)s tickets:',
groupvalue=tag.a(v, href=href, class_='query',
title=title),
groupname=query.group)),
tag.dl([(tag.dt(ticket_anchor(t)),
tag.dd(t['summary'])) for t in g],
class_='wiki compact'))
for v, g, href, title in ticket_groups()])
else:
return tag.div(tag.dl([(tag.dt(ticket_anchor(ticket)),
tag.dd(ticket['summary']))
for ticket in tickets],
class_='wiki compact'))
Backported r7449 from 0.11-stable.
git-svn-id: eda3d06fcef731589ace1b284159cead3416df9b@7450 af82e41b-90c4-0310-8c96-b1721e28e2e2
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2008 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005-2007 Christian Boos <cboos@neuf.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
import csv
from math import ceil
from datetime import datetime, timedelta
import re
from StringIO import StringIO
from genshi.builder import tag
from trac.config import Option, IntOption
from trac.core import *
from trac.db import get_column_names
from trac.mimeview.api import Mimeview, IContentConverter, Context
from trac.resource import Resource
from trac.ticket.api import TicketSystem
from trac.util import Ranges
from trac.util.compat import groupby
from trac.util.datefmt import to_timestamp, utc
from trac.util.presentation import Paginator
from trac.util.text import shorten_line
from trac.util.translation import _, tag_
from trac.web import IRequestHandler
from trac.web.href import Href
from trac.web.chrome import add_ctxtnav, add_link, add_script, add_stylesheet, \
INavigationContributor, Chrome
from trac.wiki.api import IWikiSyntaxProvider, parse_args
from trac.wiki.macros import WikiMacroBase # TODO: should be moved in .api
class QuerySyntaxError(Exception):
"""Exception raised when a ticket query cannot be parsed from a string."""
class Query(object):
substitutions = ['$USER']
def __init__(self, env, report=None, constraints=None, cols=None,
order=None, desc=0, group=None, groupdesc=0, verbose=0,
rows=None, page=None, max=None):
self.env = env
self.id = report # if not None, it's the corresponding saved query
self.constraints = constraints or {}
self.order = order
self.desc = desc
self.group = group
self.groupdesc = groupdesc
self.default_page = 1
self.items_per_page = QueryModule(self.env).items_per_page
# getting page number (default_page if unspecified)
if not page:
page = self.default_page
try:
self.page = int(page)
if self.page < 1:
raise ValueError()
except ValueError:
raise TracError(_('Query page %(page)s is invalid.', page=page))
# max=0 signifies showing all items on one page
# max=n will show precisely n items on all pages except the last
# max<0 is invalid
if max in ('none', ''):
max = 0
if max is None: # meaning unspecified
max = self.items_per_page
try:
self.max = int(max)
if self.max < 0:
raise ValueError()
except ValueError:
raise TracError(_('Query max %(max)s is invalid.', max=max))
if self.max == 0:
self.has_more_pages = False
self.offset = 0
else:
self.has_more_pages = True
self.offset = self.max * (self.page - 1)
if rows == None:
rows = []
if verbose and 'description' not in rows: # 0.10 compatibility
rows.append('description')
self.fields = TicketSystem(self.env).get_ticket_fields()
field_names = [f['name'] for f in self.fields]
self.cols = [c for c in cols or [] if c in field_names or
c in ('id', 'time', 'changetime')]
self.rows = [c for c in rows if c in field_names]
if self.order != 'id' and self.order not in field_names:
# TODO: fix after adding time/changetime to the api.py
if order == 'created':
order = 'time'
elif order == 'modified':
order = 'changetime'
if order in ('time', 'changetime'):
self.order = order
else:
self.order = 'priority'
if self.group not in field_names:
self.group = None
def from_string(cls, env, string, **kw):
filters = string.split('&')
kw_strs = ['order', 'group', 'page', 'max']
kw_arys = ['rows']
kw_bools = ['desc', 'groupdesc', 'verbose']
constraints = {}
cols = []
for filter_ in filters:
filter_ = filter_.split('=')
if len(filter_) != 2:
raise QuerySyntaxError(_('Query filter requires field and '
'constraints separated by a "="'))
field,values = filter_
if not field:
raise QuerySyntaxError(_('Query filter requires field name'))
# from last char of `field`, get the mode of comparison
mode, neg = '', ''
if field[-1] in ('~', '^', '$') \
and not field in cls.substitutions:
mode = field[-1]
field = field[:-1]
if field[-1] == '!':
neg = '!'
field = field[:-1]
processed_values = []
for val in values.split('|'):
val = neg + mode + val # add mode of comparison
processed_values.append(val)
try:
field = str(field)
if field in kw_strs:
kw[field] = processed_values[0]
elif field in kw_arys:
kw[field] = processed_values
elif field in kw_bools:
kw[field] = True
elif field == 'col':
cols.extend(processed_values)
else:
constraints[field] = processed_values
except UnicodeError:
pass # field must be a str, see `get_href()`
report = constraints.pop('report', None)
report = kw.pop('report', report)
return cls(env, report, constraints=constraints, cols=cols, **kw)
from_string = classmethod(from_string)
def get_columns(self):
if not self.cols:
self.cols = self.get_default_columns()
if not 'id' in self.cols:
# make sure 'id' is always present (needed for permission checks)
self.cols.insert(0, 'id')
return self.cols
def get_all_textareas(self):
return [f['name'] for f in self.fields if f['type'] == 'textarea']
def get_all_columns(self):
# Prepare the default list of columns
cols = ['id']
cols += [f['name'] for f in self.fields if f['type'] != 'textarea']
for col in ('reporter', 'keywords', 'cc'):
if col in cols:
cols.remove(col)
cols.append(col)
# TODO: fix after adding time/changetime to the api.py
cols += ['time', 'changetime']
# Semi-intelligently remove columns that are restricted to a single
# value by a query constraint.
for col in [k for k in self.constraints.keys()
if k != 'id' and k in cols]:
constraint = self.constraints[col]
if len(constraint) == 1 and constraint[0] \
and not constraint[0][0] in ('!', '~', '^', '$'):
if col in cols:
cols.remove(col)
if col == 'status' and not 'closed' in constraint \
and 'resolution' in cols:
cols.remove('resolution')
if self.group in cols:
cols.remove(self.group)
def sort_columns(col1, col2):
constrained_fields = self.constraints.keys()
if 'id' in (col1, col2):
# Ticket ID is always the first column
return col1 == 'id' and -1 or 1
elif 'summary' in (col1, col2):
# Ticket summary is always the second column
return col1 == 'summary' and -1 or 1
elif col1 in constrained_fields or col2 in constrained_fields:
# Constrained columns appear before other columns
return col1 in constrained_fields and -1 or 1
return 0
cols.sort(sort_columns)
return cols
def get_default_columns(self):
all_cols = self.get_all_columns()
# Only display the first seven columns by default
cols = all_cols[:7]
# Make sure the column we order by is visible, if it isn't also
# the column we group by
if not self.order in cols and not self.order == self.group:
cols[-1] = self.order
return cols
def count(self, req, db=None, cached_ids=None):
sql, args = self.get_sql(req, cached_ids)
return self._count(sql, args)
def _count(self, sql, args, db=None):
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
count_sql = 'SELECT COUNT(*) FROM (' + sql + ') AS foo'
# self.env.log.debug("Count results in Query SQL: " + count_sql %
# tuple([repr(a) for a in args]))
cnt = 0
cursor.execute(count_sql, args);
for cnt, in cursor:
break
self.env.log.debug("Count results in Query: %d" % cnt)
return cnt
def execute(self, req, db=None, cached_ids=None):
if not db:
db = self.env.get_db_cnx()
cursor = db.cursor()
sql, args = self.get_sql(req, cached_ids)
self.num_items = self._count(sql, args, db)
if self.num_items <= self.max:
self.has_more_pages = False
if self.has_more_pages:
max = self.max
if self.group:
max += 1
sql = sql + " LIMIT %d OFFSET %d" % (max, self.offset)
if (self.page > int(ceil(float(self.num_items) / self.max)) and
self.num_items != 0):
raise TracError(_('Page %(page)s is beyond the number of '
'pages in the query', page=self.page))
self.env.log.debug("Query SQL: " + sql % tuple([repr(a) for a in args]))
cursor.execute(sql, args)
columns = get_column_names(cursor)
fields = []
for column in columns:
fields += [f for f in self.fields if f['name'] == column] or [None]
results = []
column_indices = range(len(columns))
for row in cursor:
result = {}
for i in column_indices:
name, field, val = columns[i], fields[i], row[i]
if name == self.group:
val = val or 'None'
elif name == 'reporter':
val = val or 'anonymous'
elif name == 'id':
val = int(val)
result['href'] = req.href.ticket(val)
elif val is None:
val = '--'
elif name in ('changetime', 'time'):
val = datetime.fromtimestamp(int(val or 0), utc)
elif field and field['type'] == 'checkbox':
try:
val = bool(int(val))
except TypeError, ValueError:
val = False
result[name] = val
results.append(result)
cursor.close()
return results
def get_href(self, href, id=None, order=None, desc=None, format=None,
max=None, page=None):
"""Create a link corresponding to this query.
:param href: the `Href` object used to build the URL
:param id: optionally set or override the report `id`
:param order: optionally override the order parameter of the query
:param desc: optionally override the desc parameter
:param format: optionally override the format of the query
:param max: optionally override the max items per page
:param page: optionally specify which page of results (defaults to
the first)
Note: `get_resource_url` of a 'query' resource?
"""
if not isinstance(href, Href):
href = href.href # compatibility with the `req` of the 0.10 API
if format == 'rss':
max = self.items_per_page
page = self.default_page
if id is None:
id = self.id
if desc is None:
desc = self.desc
if order is None:
order = self.order
if max is None:
max = self.max
if page is None:
page = self.page
cols = self.get_columns()
# don't specify the columns in the href if they correspond to
# the default columns, page and max in the same order. That keeps the
# query url shorter in the common case where we just want the default
# columns.
if cols == self.get_default_columns():
cols = None
if page == self.default_page:
page = None
if max == self.items_per_page:
max = None
return href.query(report=id,
order=order, desc=desc and 1 or None,
group=self.group or None,
groupdesc=self.groupdesc and 1 or None,
col=cols,
row=self.rows,
max=max,
page=page,
format=format, **self.constraints)
def to_string(self):
"""Return a user readable and editable representation of the query.
Note: for now, this is an "exploded" query href, but ideally should be
expressed in TracQuery language.
"""
query_string = self.get_href(Href(''))
if query_string and '?' in query_string:
query_string = query_string.split('?', 1)[1]
return 'query:?' + query_string.replace('&', '\n&\n')
def get_sql(self, req=None, cached_ids=None):
"""Return a (sql, params) tuple for the query."""
self.get_columns()
enum_columns = ('resolution', 'priority', 'severity')
# Build the list of actual columns to query
cols = self.cols[:]
def add_cols(*args):
for col in args:
if not col in cols:
cols.append(col)
if self.group and not self.group in cols:
add_cols(self.group)
if self.rows:
add_cols('reporter', *self.rows)
add_cols('priority', 'time', 'changetime', self.order)
cols.extend([c for c in self.constraints.keys() if not c in cols])
custom_fields = [f['name'] for f in self.fields if 'custom' in f]
sql = []
sql.append("SELECT " + ",".join(['t.%s AS %s' % (c, c) for c in cols
if c not in custom_fields]))
sql.append(",priority.value AS priority_value")
for k in [k for k in cols if k in custom_fields]:
sql.append(",%s.value AS %s" % (k, k))
sql.append("\nFROM ticket AS t")
# Join with ticket_custom table as necessary
for k in [k for k in cols if k in custom_fields]:
sql.append("\n LEFT OUTER JOIN ticket_custom AS %s ON " \
"(id=%s.ticket AND %s.name='%s')" % (k, k, k, k))
# Join with the enum table for proper sorting
for col in [c for c in enum_columns
if c == self.order or c == self.group or c == 'priority']:
sql.append("\n LEFT OUTER JOIN enum AS %s ON "
"(%s.type='%s' AND %s.name=%s)"
% (col, col, col, col, col))
# Join with the version/milestone tables for proper sorting
for col in [c for c in ['milestone', 'version']
if c == self.order or c == self.group]:
sql.append("\n LEFT OUTER JOIN %s ON (%s.name=%s)"
% (col, col, col))
def get_constraint_sql(name, value, mode, neg):
if name not in custom_fields:
name = 't.' + name
else:
name = name + '.value'
value = value[len(mode) + neg:]
if mode == '':
return ("COALESCE(%s,'')%s=%%s" % (name, neg and '!' or ''),
value)
if not value:
return None
db = self.env.get_db_cnx()
value = db.like_escape(value)
if mode == '~':
value = '%' + value + '%'
elif mode == '^':
value = value + '%'
elif mode == '$':
value = '%' + value
return ("COALESCE(%s,'') %s%s" % (name, neg and 'NOT ' or '',
db.like()),
value)
clauses = []
args = []
for k, v in self.constraints.items():
if req:
v = [val.replace('$USER', req.authname) for val in v]
# Determine the match mode of the constraint (contains,
# starts-with, negation, etc.)
neg = v[0].startswith('!')
mode = ''
if len(v[0]) > neg and v[0][neg] in ('~', '^', '$'):
mode = v[0][neg]
# Special case id ranges
if k == 'id':
ranges = Ranges()
for r in v:
r = r.replace('!', '')
ranges.appendrange(r)
ids = []
id_clauses = []
for a,b in ranges.pairs:
if a == b:
ids.append(str(a))
else:
id_clauses.append('id BETWEEN %s AND %s')
args.append(a)
args.append(b)
if ids:
id_clauses.append('id IN (%s)' % (','.join(ids)))
if id_clauses:
clauses.append('%s(%s)' % (neg and 'NOT ' or '',
' OR '.join(id_clauses)))
# Special case for exact matches on multiple values
elif not mode and len(v) > 1:
if k not in custom_fields:
col = 't.' + k
else:
col = k + '.value'
clauses.append("COALESCE(%s,'') %sIN (%s)"
% (col, neg and 'NOT ' or '',
','.join(['%s' for val in v])))
args += [val[neg:] for val in v]
elif len(v) > 1:
constraint_sql = filter(None,
[get_constraint_sql(k, val, mode, neg)
for val in v])
if not constraint_sql:
continue
if neg:
clauses.append("(" + " AND ".join(
[item[0] for item in constraint_sql]) + ")")
else:
clauses.append("(" + " OR ".join(
[item[0] for item in constraint_sql]) + ")")
args += [item[1] for item in constraint_sql]
elif len(v) == 1:
constraint_sql = get_constraint_sql(k, v[0], mode, neg)
if constraint_sql:
clauses.append(constraint_sql[0])
args.append(constraint_sql[1])
clauses = filter(None, clauses)
if clauses or cached_ids:
sql.append("\nWHERE ")
if clauses:
sql.append(" AND ".join(clauses))
if cached_ids:
if clauses:
sql.append(" OR ")
sql.append("id in (%s)" % (','.join(
[str(id) for id in cached_ids])))
sql.append("\nORDER BY ")
order_cols = [(self.order, self.desc)]
if self.group and self.group != self.order:
order_cols.insert(0, (self.group, self.groupdesc))
for name, desc in order_cols:
if name in custom_fields or name in enum_columns:
col = name + '.value'
else:
col = 't.' + name
# FIXME: This is a somewhat ugly hack. Can we also have the
# column type for this? If it's an integer, we do first
# one, if text, we do 'else'
if name in ('id', 'time', 'changetime'):
if desc:
sql.append("COALESCE(%s,0)=0 DESC," % col)
else:
sql.append("COALESCE(%s,0)=0," % col)
else:
if desc:
sql.append("COALESCE(%s,'')='' DESC," % col)
else:
sql.append("COALESCE(%s,'')=''," % col)
if name in enum_columns:
# These values must be compared as ints, not as strings
db = self.env.get_db_cnx()
if desc:
sql.append(db.cast(col, 'int') + ' DESC')
else:
sql.append(db.cast(col, 'int'))
elif name in ('milestone', 'version'):
if name == 'milestone':
time_col = 'milestone.due'
else:
time_col = 'version.time'
if desc:
sql.append("COALESCE(%s,0)=0 DESC,%s DESC,%s DESC"
% (time_col, time_col, col))
else:
sql.append("COALESCE(%s,0)=0,%s,%s"
% (time_col, time_col, col))
else:
if desc:
sql.append("%s DESC" % col)
else:
sql.append("%s" % col)
if name == self.group and not name == self.order:
sql.append(",")
if self.order != 'id':
sql.append(",t.id")
return "".join(sql), args
def template_data(self, context, tickets, orig_list=None, orig_time=None,
req=None):
constraints = {}
for k, v in self.constraints.items():
constraint = {'values': [], 'mode': ''}
for val in v:
neg = val.startswith('!')
if neg:
val = val[1:]
mode = ''
if val[:1] in ('~', '^', '$') \
and not val in self.substitutions:
mode, val = val[:1], val[1:]
constraint['mode'] = (neg and '!' or '') + mode
constraint['values'].append(val)
constraints[k] = constraint
cols = self.get_columns()
labels = dict([(f['name'], f['label']) for f in self.fields])
# TODO: remove after adding time/changetime to the api.py
labels['changetime'] = _('Modified')
labels['time'] = _('Created')
headers = [{
'name': col, 'label': labels.get(col, _('Ticket')),
'href': self.get_href(context.href, order=col,
desc=(col == self.order and not self.desc))
} for col in cols]
fields = {}
for field in self.fields:
if field['type'] == 'textarea':
continue
field_data = {}
field_data.update(field)
del field_data['name']
fields[field['name']] = field_data
modes = {}
modes['text'] = [
{'name': _("contains"), 'value': "~"},
{'name': _("doesn't contain"), 'value': "!~"},
{'name': _("begins with"), 'value': "^"},
{'name': _("ends with"), 'value': "$"},
{'name': _("is"), 'value': ""},
{'name': _("is not"), 'value': "!"}
]
modes['select'] = [
{'name': _("is"), 'value': ""},
{'name': _("is not"), 'value': "!"}
]
groups = {}
groupsequence = []
for ticket in tickets:
if orig_list:
# Mark tickets added or changed since the query was first
# executed
if ticket['time'] > orig_time:
ticket['added'] = True
elif ticket['changetime'] > orig_time:
ticket['changed'] = True
if self.group:
group_key = ticket[self.group]
groups.setdefault(group_key, []).append(ticket)
if not groupsequence or group_key not in groupsequence:
groupsequence.append(group_key)
groupsequence = [(value, groups[value]) for value in groupsequence]
# detect whether the last group continues on the next page,
# by checking if the extra (max+1)th ticket is in the last group
last_group_is_partial = False
if groupsequence and self.max and len(tickets) == self.max + 1:
del tickets[-1]
if len(groupsequence[-1][1]) == 1:
# additional ticket started a new group
del groupsequence[-1] # remove that additional group
else:
# additional ticket stayed in the group
last_group_is_partial = True
del groupsequence[-1][1][-1] # remove the additional ticket
results = Paginator(tickets,
self.page - 1,
self.max,
self.num_items)
if req:
if results.has_next_page:
next_href = self.get_href(req.href, max=self.max,
page=self.page + 1)
add_link(req, 'next', next_href, _('Next Page'))
if results.has_previous_page:
prev_href = self.get_href(req.href, max=self.max,
page=self.page - 1)
add_link(req, 'prev', prev_href, _('Previous Page'))
else:
results.show_index = False
pagedata = []
shown_pages = results.get_shown_pages(21)
for page in shown_pages:
pagedata.append([self.get_href(context.href, page=page), None,
str(page), _('Page %(num)d', num=page)])
results.shown_pages = [dict(zip(['href', 'class', 'string', 'title'],
p)) for p in pagedata]
results.current_page = {'href': None, 'class': 'current',
'string': str(results.page + 1),
'title':None}
return {'query': self,
'context': context,
'col': cols,
'row': self.rows,
'constraints': constraints,
'labels': labels,
'headers': headers,
'fields': fields,
'modes': modes,
'tickets': tickets,
'groups': groupsequence or [(None, tickets)],
'last_group_is_partial': last_group_is_partial,
'paginator': results}
class QueryModule(Component):
implements(IRequestHandler, INavigationContributor, IWikiSyntaxProvider,
IContentConverter)
default_query = Option('query', 'default_query',
default='status!=closed&owner=$USER',
doc='The default query for authenticated users.')
default_anonymous_query = Option('query', 'default_anonymous_query',
default='status!=closed&cc~=$USER',
doc='The default query for anonymous users.')
items_per_page = IntOption('query', 'items_per_page', 100,
"""Number of tickets displayed per page in ticket queries,
by default (''since 0.11'')""")
# IContentConverter methods
def get_supported_conversions(self):
yield ('rss', _('RSS Feed'), 'xml',
'trac.ticket.Query', 'application/rss+xml', 8)
yield ('csv', _('Comma-delimited Text'), 'csv',
'trac.ticket.Query', 'text/csv', 8)
yield ('tab', _('Tab-delimited Text'), 'tsv',
'trac.ticket.Query', 'text/tab-separated-values', 8)
def convert_content(self, req, mimetype, query, key):
if key == 'rss':
return self.export_rss(req, query)
elif key == 'csv':
return self.export_csv(req, query, mimetype='text/csv')
elif key == 'tab':
return self.export_csv(req, query, '\t',
mimetype='text/tab-separated-values')
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'tickets'
def get_navigation_items(self, req):
from trac.ticket.report import ReportModule
if 'TICKET_VIEW' in req.perm and \
not self.env.is_component_enabled(ReportModule):
yield ('mainnav', 'tickets',
tag.a(_('View Tickets'), href=req.href.query()))
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/query'
def process_request(self, req):
req.perm.assert_permission('TICKET_VIEW')
constraints = self._get_constraints(req)
if not constraints and not 'order' in req.args:
# If no constraints are given in the URL, use the default ones.
if req.authname and req.authname != 'anonymous':
qstring = self.default_query
user = req.authname
else:
email = req.session.get('email')
name = req.session.get('name')
qstring = self.default_anonymous_query
user = email or name or None
if user:
qstring = qstring.replace('$USER', user)
self.log.debug('QueryModule: Using default query: %s', str(qstring))
constraints = Query.from_string(self.env, qstring).constraints
# Ensure no field constraints that depend on $USER are used
# if we have no username.
for field, vals in constraints.items():
for val in vals:
if val.endswith('$USER'):
del constraints[field]
cols = req.args.get('col')
if isinstance(cols, basestring):
cols = [cols]
# Since we don't show 'id' as an option to the user,
# we need to re-insert it here.
if cols and 'id' not in cols:
cols.insert(0, 'id')
rows = req.args.get('row', [])
if isinstance(rows, basestring):
rows = [rows]
format = req.args.get('format')
max = req.args.get('max')
if max is None and format in ('csv', 'tab'):
max = 0 # unlimited unless specified explicitly
query = Query(self.env, req.args.get('report'),
constraints, cols, req.args.get('order'),
'desc' in req.args, req.args.get('group'),
'groupdesc' in req.args, 'verbose' in req.args,
rows,
req.args.get('page'),
max)
if 'update' in req.args:
# Reset session vars
for var in ('query_constraints', 'query_time', 'query_tickets'):
if var in req.session:
del req.session[var]
req.redirect(query.get_href(req.href))
# Add registered converters
for conversion in Mimeview(self.env).get_supported_conversions(
'trac.ticket.Query'):
add_link(req, 'alternate',
query.get_href(req.href, format=conversion[0]),
conversion[1], conversion[4], conversion[0])
if format:
Mimeview(self.env).send_converted(req, 'trac.ticket.Query', query,
format, 'query')
return self.display_html(req, query)
# Internal methods
def _get_constraints(self, req):
constraints = {}
ticket_fields = [f['name'] for f in
TicketSystem(self.env).get_ticket_fields()]
ticket_fields.append('id')
# For clients without JavaScript, we remove constraints here if
# requested
remove_constraints = {}
to_remove = [k[10:] for k in req.args.keys()
if k.startswith('rm_filter_')]
if to_remove: # either empty or containing a single element
match = re.match(r'(\w+?)_(\d+)$', to_remove[0])
if match:
remove_constraints[match.group(1)] = int(match.group(2))
else:
remove_constraints[to_remove[0]] = -1
for field in [k for k in req.args.keys() if k in ticket_fields]:
vals = req.args[field]
if not isinstance(vals, (list, tuple)):
vals = [vals]
if vals:
mode = req.args.get(field + '_mode')
if mode:
vals = [mode + x for x in vals]
if field in remove_constraints:
idx = remove_constraints[field]
if idx >= 0:
del vals[idx]
if not vals:
continue
else:
continue
constraints[field] = vals
return constraints
def display_html(self, req, query):
db = self.env.get_db_cnx()
# The most recent query is stored in the user session;
orig_list = None
orig_time = datetime.now(utc)
query_time = int(req.session.get('query_time', 0))
query_time = datetime.fromtimestamp(query_time, utc)
query_constraints = unicode(query.constraints)
if query_constraints != req.session.get('query_constraints') \
or query_time < orig_time - timedelta(hours=1):
tickets = query.execute(req, db)
# New or outdated query, (re-)initialize session vars
req.session['query_constraints'] = query_constraints
req.session['query_tickets'] = ' '.join([str(t['id'])
for t in tickets])
else:
orig_list = [int(id) for id
in req.session.get('query_tickets', '').split()]
tickets = query.execute(req, db, orig_list)
orig_time = query_time
context = Context.from_request(req, 'query')
data = query.template_data(context, tickets, orig_list, orig_time, req)
# For clients without JavaScript, we add a new constraint here if
# requested
constraints = data['constraints']
if 'add' in req.args:
field = req.args.get('add_filter')
if field:
constraint = constraints.setdefault(field, {})
constraint.setdefault('values', []).append('')
# FIXME: '' not always correct (e.g. checkboxes)
req.session['query_href'] = query.get_href(context.href)
req.session['query_time'] = to_timestamp(orig_time)
req.session['query_tickets'] = ' '.join([str(t['id'])
for t in tickets])
title = _('Custom Query')
# Only interact with the report module if it is actually enabled.
#
# Note that with saved custom queries, there will be some convergence
# between the report module and the query module.
from trac.ticket.report import ReportModule
if 'REPORT_VIEW' in req.perm and \
self.env.is_component_enabled(ReportModule):
data['report_href'] = req.href.report()
add_ctxtnav(req, _('Available Reports'), req.href.report())
add_ctxtnav(req, _('Custom Query'))
if query.id:
cursor = db.cursor()
cursor.execute("SELECT title,description FROM report "
"WHERE id=%s", (query.id,))
for title, description in cursor:
data['report_resource'] = Resource('report', query.id)
data['description'] = description
else:
data['report_href'] = None
data.setdefault('report', None)
data.setdefault('description', None)
data['title'] = title
data['all_columns'] = query.get_all_columns()
# Don't allow the user to remove the id column
data['all_columns'].remove('id')
data['all_textareas'] = query.get_all_textareas()
add_stylesheet(req, 'common/css/report.css')
add_script(req, 'common/js/query.js')
return 'query.html', data, None
def export_csv(self, req, query, sep=',', mimetype='text/plain'):
content = StringIO()
cols = query.get_columns()
writer = csv.writer(content, delimiter=sep)
writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL)
writer.writerow([unicode(c).encode('utf-8') for c in cols])
context = Context.from_request(req)
results = query.execute(req, self.env.get_db_cnx())
for result in results:
ticket = Resource('ticket', result['id'])
if 'TICKET_VIEW' in req.perm(ticket):
values = []
for col in cols:
value = result[col]
if col in ('cc', 'reporter'):
value = Chrome(self.env).format_emails(context(ticket),
value)
values.append(unicode(value).encode('utf-8'))
writer.writerow(values)
return (content.getvalue(), '%s;charset=utf-8' % mimetype)
def export_rss(self, req, query):
if 'description' not in query.rows:
query.rows.append('description')
db = self.env.get_db_cnx()
results = query.execute(req, db)
query_href = req.abs_href.query(group=query.group,
groupdesc=(query.groupdesc and 1
or None),
row=query.rows,
page=req.args.get('page'),
max=req.args.get('max'),
**query.constraints)
data = {
'context': Context.from_request(req, 'query', absurls=True),
'results': results,
'query_href': query_href
}
output = Chrome(self.env).render_template(req, 'query.rss', data,
'application/rss+xml')
return output, 'application/rss+xml'
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('query', self._format_link)
def _format_link(self, formatter, ns, query, label):
if query.startswith('?'):
return tag.a(label, class_='query',
href=formatter.href.query() + query.replace(' ', '+'))
else:
try:
query = Query.from_string(self.env, query)
return tag.a(label,
href=query.get_href(formatter.context.href),
class_='query')
except QuerySyntaxError, e:
return tag.em(_('[Error: %(error)s]', error=e), class_='error')
class TicketQueryMacro(WikiMacroBase):
"""Macro that lists tickets that match certain criteria.
This macro accepts a comma-separated list of keyed parameters,
in the form "key=value".
If the key is the name of a field, the value must use the syntax
of a filter specifier as defined in TracQuery#QueryLanguage.
Note that this is ''not'' the same as the simplified URL syntax
used for `query:` links starting with a `?` character.
In addition to filters, several other named parameters can be used
to control how the results are presented. All of them are optional.
The `format` parameter determines how the list of tickets is
presented:
- '''list''' -- the default presentation is to list the ticket ID next
to the summary, with each ticket on a separate line.
- '''compact''' -- the tickets are presented as a comma-separated
list of ticket IDs.
- '''count''' -- only the count of matching tickets is displayed
- '''table''' -- a view similar to the custom query view (but without
the controls)
The `max` parameter can be used to limit the number of tickets shown
(defaults to '''0''', i.e. no maximum).
The `order` parameter sets the field used for ordering tickets
(defaults to '''id''').
The `desc` parameter indicates whether the order of the tickets
should be reversed (defaults to '''false''').
The `group` parameter sets the field used for grouping tickets
(defaults to not being set).
The `groupdesc` parameter indicates whether the natural display
order of the groups should be reversed (defaults to '''false''').
The `verbose` parameter can be set to a true value in order to
get the description for the listed tickets. For '''table''' format only.
''deprecated in favor of the `rows` parameter''
The `rows` parameter can be used to specify which field(s) should
be viewed as a row, e.g. `rows=description|summary`
For compatibility with Trac 0.10, if there's a second positional parameter
given to the macro, it will be used to specify the `format`.
Also, using "&" as a field separator still works but is deprecated.
"""
def expand_macro(self, formatter, name, content):
req = formatter.req
query_string = ''
argv, kwargs = parse_args(content, strict=False)
if len(argv) > 0 and not 'format' in kwargs: # 0.10 compatibility hack
kwargs['format'] = argv[0]
if 'order' not in kwargs:
kwargs['order'] = 'id'
if 'max' not in kwargs:
kwargs['max'] = '0' # unlimited by default
format = kwargs.pop('format', 'list').strip().lower()
if format in ('list', 'compact'): # we need 'status' and 'summary'
kwargs['col'] = '|'.join(['status', 'summary',
kwargs.get('col', '')])
query_string = '&'.join(['%s=%s' % item
for item in kwargs.iteritems()])
query = Query.from_string(self.env, query_string)
if format == 'count':
cnt = query.count(req)
return tag.span(cnt, title='%d tickets for which %s' %
(cnt, query_string), class_='query_count')
tickets = query.execute(req)
if format == 'table':
data = query.template_data(formatter.context, tickets)
add_stylesheet(req, 'common/css/report.css')
return Chrome(self.env).render_template(
req, 'query_results.html', data, None, fragment=True)
# 'table' format had its own permission checks, here we need to
# do it explicitly:
tickets = [t for t in tickets
if 'TICKET_VIEW' in req.perm('ticket', t['id'])]
if not tickets:
return tag.span(_("No results"), class_='query_no_results')
def ticket_anchor(ticket):
return tag.a('#%s' % ticket['id'],
class_=ticket['status'],
href=req.href.ticket(int(ticket['id'])),
title=shorten_line(ticket['summary']))
def ticket_groups():
groups = []
for v, g in groupby(tickets, lambda t: t[query.group]):
q = Query.from_string(self.env, query_string)
# produce the hint for the group
q.group = q.groupdesc = None
order = q.order
q.order = None
title = "%s %s tickets matching %s" % (v, query.group,
q.to_string())
# produce the href for the query corresponding to the group
q.constraints[str(query.group)] = v
q.order = order
href = q.get_href(formatter.context)
groups.append((v, [t for t in g], href, title))
return groups
if format == 'compact':
if query.group:
groups = [tag.a('#%s' % ','.join([str(t['id'])
for t in g]),
href=href, class_='query', title=title)
for v, g, href, title in ticket_groups()]
return tag(groups[0], [(', ', g) for g in groups[1:]])
else:
alist = [ticket_anchor(ticket) for ticket in tickets]
return tag.span(alist[0], *[(', ', a) for a in alist[1:]])
else:
if query.group:
return tag.div(
[(tag.p(tag_('%(groupvalue)s %(groupname)s tickets:',
groupvalue=tag.a(v, href=href, class_='query',
title=title),
groupname=query.group)),
tag.dl([(tag.dt(ticket_anchor(t)),
tag.dd(t['summary'])) for t in g],
class_='wiki compact'))
for v, g, href, title in ticket_groups()])
else:
return tag.div(tag.dl([(tag.dt(ticket_anchor(ticket)),
tag.dd(ticket['summary']))
for ticket in tickets],
class_='wiki compact'))
|
#!/usr/bin/python
# Copyright (c) 2014 Greg James, Visual6502.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
import params
import imagePIL
from sim2600Console import Sim2600Console
import sim6502
import simTIA
class MainSim:
"""
This is a sim runner designed to be invoked from an
outer experiment runner. We pass in the romfile,
and manually advance the clock ourselves
Only render with images, no OpenGL
"""
def __init__(self, romFile, imgdir):
self.imageGL = None # imageOpenGL.getInterface()
self.imagePIL = imagePIL.getInterface(imgdir)
self.imageRaw = None
self.elapsedHalfClocks = 0
# The console simulator ties together a simulation
# of the 6502 chip, a simulation of the TIA chip,
# an emulation of the 6532 RIOT (RAM, I/O, Timer), and
# a cartridge ROM file holding the program instructions.
#
#self.sim = Sim2600Console(params.romFile)
self.sim = Sim2600Console(romFile,
sim6502.MySim6502, simTIA.MySimTIA)
# For measuring how fast the simulation is running
self.lastUpdateTimeSec = None
def updateSimOneFrame(self, logger=None):
tia = self.sim.simTIA
pixels = []
i = 0
while i < params.numTIAHalfClocksPerRender:
self.sim.advanceOneHalfClock()
# Get pixel color when TIA clock (~3mHz) is low
if tia.isLow(tia.padIndCLK0):
restartImage = False
if self.sim.simTIA.isHigh(self.sim.simTIA.vsync):
print('VSYNC high at TIA halfclock %d'%(tia.halfClkCount))
restartImage = True
# grayscale
#lum = self.simTIA.get3BitLuminance() << 5
#rgba = (lum << 24) | (lum << 16) | (lum << 8) | 0xFF
# color
rgba = self.sim.simTIA.getColorRGBA8()
if self.imagePIL != None:
if restartImage == True:
self.imagePIL.restartImage()
self.imagePIL.setNextPixel(rgba)
if self.sim.simTIA.isHigh(self.sim.simTIA.vblank):
print('VBLANK at TIA halfclock %d'%(tia.halfClkCount))
#cpuStr = self.sim6502.getStateStr1()
#tiaStr = self.simTIA.getTIAStateStr1()
#print(cpuStr + ' ' + tiaStr)
if logger is not None:
logger(self.sim, i)
i += 1
if self.lastUpdateTimeSec != None:
elapsedSec = time.time() - self.lastUpdateTimeSec
secPerSimClock = 2.0 * elapsedSec / params.numTIAHalfClocksPerRender
totalWires = self.sim.sim6507.numWiresRecalculated + \
self.sim.simTIA.numWiresRecalculated
wiresPerClock = 2.0 * totalWires / params.numTIAHalfClocksPerRender
print(' ' +
'%d wires/clk, %g msec/clk'%
(wiresPerClock, secPerSimClock * 1000))
self.sim.sim6507.numWiresRecalculated = 0
self.sim.simTIA.numWiresRecalculated = 0
self.lastUpdateTimeSec = time.time()
def getStateStr(self):
cpu = self.sim.sim6507
tia = self.sim.simTIA
sstr = 'CLKS %d%d'%(cpu.isHighWN('CLK0'), tia.isHighWN('CLK0')) + ' '
sstr += 'RS,RDY %d%d'%(cpu.isHighWN('RES'), cpu.isHighWN('RDY')) + ' '
sstr += 'ADDR 0x%4.4X DB 0x%2.2X '% \
(self.sim.sim6507.getAddressBusValue(),
self.sim.sim6507.getDataBusValue())
sstr += self.simTIA.getTIAStateStr1()
return sstr
removed spurious member funcs
#!/usr/bin/python
# Copyright (c) 2014 Greg James, Visual6502.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
import params
import imagePIL
from sim2600Console import Sim2600Console
import sim6502
import simTIA
class MainSim:
"""
This is a sim runner designed to be invoked from an
outer experiment runner. We pass in the romfile,
and manually advance the clock ourselves
Only render with images, no OpenGL
"""
def __init__(self, romFile, imgdir):
self.imagePIL = imagePIL.getInterface(imgdir)
self.elapsedHalfClocks = 0
# The console simulator ties together a simulation
# of the 6502 chip, a simulation of the TIA chip,
# an emulation of the 6532 RIOT (RAM, I/O, Timer), and
# a cartridge ROM file holding the program instructions.
#
#self.sim = Sim2600Console(params.romFile)
self.sim = Sim2600Console(romFile,
sim6502.MySim6502, simTIA.MySimTIA)
# For measuring how fast the simulation is running
self.lastUpdateTimeSec = None
def updateSimOneFrame(self, logger=None):
tia = self.sim.simTIA
pixels = []
i = 0
while i < params.numTIAHalfClocksPerRender:
self.sim.advanceOneHalfClock()
# Get pixel color when TIA clock (~3mHz) is low
if tia.isLow(tia.padIndCLK0):
restartImage = False
if self.sim.simTIA.isHigh(self.sim.simTIA.vsync):
print('VSYNC high at TIA halfclock %d'%(tia.halfClkCount))
restartImage = True
# grayscale
#lum = self.simTIA.get3BitLuminance() << 5
#rgba = (lum << 24) | (lum << 16) | (lum << 8) | 0xFF
# color
rgba = self.sim.simTIA.getColorRGBA8()
if self.imagePIL != None:
if restartImage == True:
self.imagePIL.restartImage()
self.imagePIL.setNextPixel(rgba)
if self.sim.simTIA.isHigh(self.sim.simTIA.vblank):
print('VBLANK at TIA halfclock %d'%(tia.halfClkCount))
#cpuStr = self.sim6502.getStateStr1()
#tiaStr = self.simTIA.getTIAStateStr1()
#print(cpuStr + ' ' + tiaStr)
if logger is not None:
logger(self.sim, i)
i += 1
if self.lastUpdateTimeSec != None:
elapsedSec = time.time() - self.lastUpdateTimeSec
secPerSimClock = 2.0 * elapsedSec / params.numTIAHalfClocksPerRender
totalWires = self.sim.sim6507.numWiresRecalculated + \
self.sim.simTIA.numWiresRecalculated
wiresPerClock = 2.0 * totalWires / params.numTIAHalfClocksPerRender
print(' ' +
'%d wires/clk, %g msec/clk'%
(wiresPerClock, secPerSimClock * 1000))
self.sim.sim6507.numWiresRecalculated = 0
self.sim.simTIA.numWiresRecalculated = 0
self.lastUpdateTimeSec = time.time()
def getStateStr(self):
cpu = self.sim.sim6507
tia = self.sim.simTIA
sstr = 'CLKS %d%d'%(cpu.isHighWN('CLK0'), tia.isHighWN('CLK0')) + ' '
sstr += 'RS,RDY %d%d'%(cpu.isHighWN('RES'), cpu.isHighWN('RDY')) + ' '
sstr += 'ADDR 0x%4.4X DB 0x%2.2X '% \
(self.sim.sim6507.getAddressBusValue(),
self.sim.sim6507.getDataBusValue())
sstr += self.simTIA.getTIAStateStr1()
return sstr
|
import json
import os
import struct
import socket
from twisted.internet import stdio
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.protocols import basic
from WebAPI import setup_web
from PSO2Protocols import shipdata, ShipInfoFactory, BlockSenderFactory
from Commands import Commands
from ProxyRedis import p, r
class ServerConsole(basic.LineReceiver):
def __init__(self):
self.delimiter = os.linesep
def connectionMade(self):
self.transport.write('>>> ')
def lineReceived(self, line):
"""
:type line: str
"""
command = line.split(' ')[0]
if command in Commands:
if len(line.split(' ')) > 1:
Commands[command](line.split(' ', 1)[1])
else:
Commands[command](line)
else:
print("[PSO2PD] Command not found.")
self.transport.write('>>> ')
print("=== PSO2Proxy-Distributed master server starting...")
rthread = p.run_in_thread(sleep_time=0.001)
print("[Redis] Messaging thread running.")
print("[PSO2PD] Getting ship statuses...")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(("210.189.208.1", 12199)) # Un-hardcode this!
shipdata.write(s.recv(4))
size = struct.unpack_from("i", shipdata.getvalue())[0]
shipdata.write(s.recv(size - 4))
except:
print("[PSO2PD] I got an error :(")
print("[PSO2PD] Cached ship query.")
print("[PSO2PD] Starting reactors...")
for x in xrange(0, 10):
endpoint = TCP4ServerEndpoint(reactor, 12000 + (100 * x))
endpoint.listen(BlockSenderFactory())
for x in xrange(0, 10):
endpoint = TCP4ServerEndpoint(reactor, 12099 + (100 * x))
endpoint.listen(ShipInfoFactory())
stdio.StandardIO(ServerConsole())
print("[PSO2PD] Reactor started.")
print("[PSO2PD] Announcing presence...")
r.publish("proxy-server-*", json.dumps({'command': "register"}))
setup_web()
reactor.run()
rthread.stop()
use global channel
import json
import os
import struct
import socket
from twisted.internet import stdio
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.protocols import basic
from WebAPI import setup_web
from PSO2Protocols import shipdata, ShipInfoFactory, BlockSenderFactory
from Commands import Commands
from ProxyRedis import p, r
class ServerConsole(basic.LineReceiver):
def __init__(self):
self.delimiter = os.linesep
def connectionMade(self):
self.transport.write('>>> ')
def lineReceived(self, line):
"""
:type line: str
"""
command = line.split(' ')[0]
if command in Commands:
if len(line.split(' ')) > 1:
Commands[command](line.split(' ', 1)[1])
else:
Commands[command](line)
else:
print("[PSO2PD] Command not found.")
self.transport.write('>>> ')
print("=== PSO2Proxy-Distributed master server starting...")
rthread = p.run_in_thread(sleep_time=0.001)
print("[Redis] Messaging thread running.")
print("[PSO2PD] Getting ship statuses...")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(("210.189.208.1", 12199)) # Un-hardcode this!
shipdata.write(s.recv(4))
size = struct.unpack_from("i", shipdata.getvalue())[0]
shipdata.write(s.recv(size - 4))
except:
print("[PSO2PD] I got an error :(")
print("[PSO2PD] Cached ship query.")
print("[PSO2PD] Starting reactors...")
for x in xrange(0, 10):
endpoint = TCP4ServerEndpoint(reactor, 12000 + (100 * x))
endpoint.listen(BlockSenderFactory())
for x in xrange(0, 10):
endpoint = TCP4ServerEndpoint(reactor, 12099 + (100 * x))
endpoint.listen(ShipInfoFactory())
stdio.StandardIO(ServerConsole())
print("[PSO2PD] Reactor started.")
print("[PSO2PD] Announcing presence...")
r.publish("proxy-global", json.dumps({'command': "register"}))
setup_web()
reactor.run()
rthread.stop() |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for FHD_cal object."""
import pytest
import os
import gc
import numpy as np
from pyuvdata import UVCal
import pyuvdata.tests as uvtest
from pyuvdata.data import DATA_PATH
# set up FHD file list
testdir = os.path.join(DATA_PATH, "fhd_cal_data/")
testfile_prefix = "1061316296_"
obs_testfile = os.path.join(testdir, testfile_prefix + "obs.sav")
cal_testfile = os.path.join(testdir, testfile_prefix + "cal.sav")
settings_testfile = os.path.join(testdir, testfile_prefix + "settings.txt")
def test_read_fhdcal_write_read_calfits(tmp_path):
"""
FHD cal to calfits loopback test.
Read in FHD cal files, write out as calfits, read back in and check for
object equality.
"""
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile, settings_file=settings_testfile)
assert np.max(fhd_cal.gain_array) < 2.0
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
del fhd_cal, calfits_cal
gc.collect()
# do it again with fit gains (rather than raw)
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(
cal_testfile, obs_testfile, settings_file=settings_testfile, raw=False
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
def test_extra_history(tmp_path):
"""Test that setting the extra_history keyword works."""
fhd_cal = UVCal()
calfits_cal = UVCal()
extra_history = "Some extra history for testing\n"
fhd_cal.read_fhd_cal(
cal_testfile,
obs_testfile,
settings_file=settings_testfile,
extra_history=extra_history,
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
assert extra_history in fhd_cal.history
del fhd_cal, calfits_cal
gc.collect()
# try again with a list of history strings
fhd_cal = UVCal()
calfits_cal = UVCal()
extra_history = ["Some extra history for testing", "And some more history as well"]
fhd_cal.read_fhd_cal(
cal_testfile,
obs_testfile,
settings_file=settings_testfile,
extra_history=extra_history,
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
for line in extra_history:
assert line in fhd_cal.history
def test_flags_galaxy(tmp_path):
"""Test files with time, freq and tile flags and galaxy models behave."""
testdir = os.path.join(DATA_PATH, "fhd_cal_data/flag_set")
obs_testfile_flag = os.path.join(testdir, testfile_prefix + "obs.sav")
cal_testfile_flag = os.path.join(testdir, testfile_prefix + "cal.sav")
settings_testfile_flag = os.path.join(testdir, testfile_prefix + "settings.txt")
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(
cal_testfile_flag, obs_testfile_flag, settings_file=settings_testfile_flag
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
def test_break_read_fhdcal():
"""Try various cases of missing files."""
fhd_cal = UVCal()
pytest.raises(TypeError, fhd_cal.read_fhd_cal, cal_testfile) # Missing obs
uvtest.checkWarnings(
fhd_cal.read_fhd_cal, [cal_testfile, obs_testfile], message=["No settings file"]
)
# Check only pyuvdata version history with no settings file
assert fhd_cal.history == "\n" + fhd_cal.pyuvdata_version_str
def test_read_multi(tmp_path):
"""Test reading in multiple files."""
testdir2 = os.path.join(DATA_PATH, "fhd_cal_data/set2")
obs_testfile_list = [
obs_testfile,
os.path.join(testdir2, testfile_prefix + "obs.sav"),
]
cal_testfile_list = [
cal_testfile,
os.path.join(testdir2, testfile_prefix + "cal.sav"),
]
settings_testfile_list = [
settings_testfile,
os.path.join(testdir2, testfile_prefix + "settings.txt"),
]
fhd_cal = UVCal()
calfits_cal = UVCal()
uvtest.checkWarnings(
fhd_cal.read_fhd_cal,
func_args=[cal_testfile_list, obs_testfile_list],
func_kwargs={"settings_file": settings_testfile_list},
message="UVParameter diffuse_model does not match",
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
def test_break_read_multi():
"""Test errors for different numbers of files."""
testdir2 = os.path.join(DATA_PATH, "fhd_cal_data/set2")
obs_testfile_list = [
obs_testfile,
os.path.join(testdir2, testfile_prefix + "obs.sav"),
]
cal_testfile_list = [
cal_testfile,
os.path.join(testdir2, testfile_prefix + "cal.sav"),
]
settings_testfile_list = [
settings_testfile,
os.path.join(testdir2, testfile_prefix + "settings.txt"),
]
fhd_cal = UVCal()
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list,
obs_testfile_list[0],
settings_file=settings_testfile_list,
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list,
obs_testfile_list,
settings_file=settings_testfile_list[0],
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list,
obs_testfile_list + obs_testfile_list,
settings_file=settings_testfile_list,
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list,
obs_testfile_list,
settings_file=settings_testfile_list + settings_testfile_list,
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list[0],
obs_testfile_list,
settings_file=settings_testfile_list[0],
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list[0],
obs_testfile_list[0],
settings_file=settings_testfile_list,
)
Remove gc statements
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for FHD_cal object."""
import pytest
import os
import numpy as np
from pyuvdata import UVCal
import pyuvdata.tests as uvtest
from pyuvdata.data import DATA_PATH
# set up FHD file list
testdir = os.path.join(DATA_PATH, "fhd_cal_data/")
testfile_prefix = "1061316296_"
obs_testfile = os.path.join(testdir, testfile_prefix + "obs.sav")
cal_testfile = os.path.join(testdir, testfile_prefix + "cal.sav")
settings_testfile = os.path.join(testdir, testfile_prefix + "settings.txt")
def test_read_fhdcal_raw_write_read_calfits(tmp_path):
"""
FHD cal to calfits loopback test.
Read in FHD cal files, write out as calfits, read back in and check for
object equality.
"""
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(cal_testfile, obs_testfile, settings_file=settings_testfile)
assert np.max(fhd_cal.gain_array) < 2.0
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
return
def test_read_fhdcal_fit_write_read_calfits(tmp_path):
# do it again with fit gains (rather than raw)
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(
cal_testfile, obs_testfile, settings_file=settings_testfile, raw=False
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
return
def test_extra_history(tmp_path):
"""Test that setting the extra_history keyword works."""
fhd_cal = UVCal()
calfits_cal = UVCal()
extra_history = "Some extra history for testing\n"
fhd_cal.read_fhd_cal(
cal_testfile,
obs_testfile,
settings_file=settings_testfile,
extra_history=extra_history,
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
assert extra_history in fhd_cal.history
return
def test_extra_history_strings(tmp_path):
# try again with a list of history strings
fhd_cal = UVCal()
calfits_cal = UVCal()
extra_history = ["Some extra history for testing", "And some more history as well"]
fhd_cal.read_fhd_cal(
cal_testfile,
obs_testfile,
settings_file=settings_testfile,
extra_history=extra_history,
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
for line in extra_history:
assert line in fhd_cal.history
return
def test_flags_galaxy(tmp_path):
"""Test files with time, freq and tile flags and galaxy models behave."""
testdir = os.path.join(DATA_PATH, "fhd_cal_data/flag_set")
obs_testfile_flag = os.path.join(testdir, testfile_prefix + "obs.sav")
cal_testfile_flag = os.path.join(testdir, testfile_prefix + "cal.sav")
settings_testfile_flag = os.path.join(testdir, testfile_prefix + "settings.txt")
fhd_cal = UVCal()
calfits_cal = UVCal()
fhd_cal.read_fhd_cal(
cal_testfile_flag, obs_testfile_flag, settings_file=settings_testfile_flag
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
def test_break_read_fhdcal():
"""Try various cases of missing files."""
fhd_cal = UVCal()
pytest.raises(TypeError, fhd_cal.read_fhd_cal, cal_testfile) # Missing obs
uvtest.checkWarnings(
fhd_cal.read_fhd_cal, [cal_testfile, obs_testfile], message=["No settings file"]
)
# Check only pyuvdata version history with no settings file
assert fhd_cal.history == "\n" + fhd_cal.pyuvdata_version_str
def test_read_multi(tmp_path):
"""Test reading in multiple files."""
testdir2 = os.path.join(DATA_PATH, "fhd_cal_data/set2")
obs_testfile_list = [
obs_testfile,
os.path.join(testdir2, testfile_prefix + "obs.sav"),
]
cal_testfile_list = [
cal_testfile,
os.path.join(testdir2, testfile_prefix + "cal.sav"),
]
settings_testfile_list = [
settings_testfile,
os.path.join(testdir2, testfile_prefix + "settings.txt"),
]
fhd_cal = UVCal()
calfits_cal = UVCal()
uvtest.checkWarnings(
fhd_cal.read_fhd_cal,
func_args=[cal_testfile_list, obs_testfile_list],
func_kwargs={"settings_file": settings_testfile_list},
message="UVParameter diffuse_model does not match",
)
outfile = str(tmp_path / "outtest_FHDcal_1061311664.calfits")
fhd_cal.write_calfits(outfile, clobber=True)
calfits_cal.read_calfits(outfile)
assert fhd_cal == calfits_cal
def test_break_read_multi():
"""Test errors for different numbers of files."""
testdir2 = os.path.join(DATA_PATH, "fhd_cal_data/set2")
obs_testfile_list = [
obs_testfile,
os.path.join(testdir2, testfile_prefix + "obs.sav"),
]
cal_testfile_list = [
cal_testfile,
os.path.join(testdir2, testfile_prefix + "cal.sav"),
]
settings_testfile_list = [
settings_testfile,
os.path.join(testdir2, testfile_prefix + "settings.txt"),
]
fhd_cal = UVCal()
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list,
obs_testfile_list[0],
settings_file=settings_testfile_list,
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list,
obs_testfile_list,
settings_file=settings_testfile_list[0],
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list,
obs_testfile_list + obs_testfile_list,
settings_file=settings_testfile_list,
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list,
obs_testfile_list,
settings_file=settings_testfile_list + settings_testfile_list,
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list[0],
obs_testfile_list,
settings_file=settings_testfile_list[0],
)
pytest.raises(
ValueError,
fhd_cal.read_fhd_cal,
cal_testfile_list[0],
obs_testfile_list[0],
settings_file=settings_testfile_list,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.