hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3b08af7c3dc096936c3528aa09cebaf722275b | 1,980 | py | Python | house_price_mlp.py | starcroce/tf_dl_cookbook | 65c0cb9c9df230e551df5f04c5e2345dcbe53552 | [
"MIT"
] | null | null | null | house_price_mlp.py | starcroce/tf_dl_cookbook | 65c0cb9c9df230e551df5f04c5e2345dcbe53552 | [
"MIT"
] | null | null | null | house_price_mlp.py | starcroce/tf_dl_cookbook | 65c0cb9c9df230e551df5f04c5e2345dcbe53552 | [
"MIT"
] | 1 | 2019-01-02T06:42:36.000Z | 2019-01-02T06:42:36.000Z | import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import tensorflow.contrib.layers as layers
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
boston = datasets.load_boston()
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df["target"] = boston.target
X_train, X_test, y_train, y_test = train_test_split(
df[["RM", "LSTAT", "PTRATIO"]], df[["target"]], test_size=0.3, random_state=0
)
X_train = MinMaxScaler().fit_transform(X_train)
X_test = MinMaxScaler().fit_transform(X_test)
y_train = MinMaxScaler().fit_transform(y_train)
y_test = MinMaxScaler().fit_transform(y_test)
m = len(X_train)
n = 3 # number of features
n_hidden = 20 # number of hidden neurons
batch_size = 200
eta = 0.01
max_epoch = 1000
def multilayer_perceptron(x):
fcl = layers.fully_connected(x, n_hidden, activation_fn=tf.nn.relu, scope="fcl")
out = layers.fully_connected(fcl, 1, activation_fn=tf.nn.sigmoid, scope="out")
return out
# build model, loss and train op
x = tf.placeholder(tf.float32, name="X", shape=[m, n])
y = tf.placeholder(tf.float32, name="Y")
y_hat = multilayer_perceptron(x)
correct_prediction = tf.square(y - y_hat)
mse = tf.reduce_mean(tf.cast(correct_prediction, "float"))
train = tf.train.AdamOptimizer(learning_rate=eta).minimize(mse)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter("graphs", sess.graph)
for i in range(max_epoch):
_, l, p = sess.run([train, mse, y_hat], feed_dict={x: X_train, y: y_train})
if i % 100 == 0:
print(f"Epoch {i}: Loss {l}")
print("Training Done!")
correct_prediction = tf.square(y - y_hat)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Mean square error:", accuracy.eval({x: X_train, y: y_train}))
plt.scatter(y_train, p)
plt.show()
writer.close()
| 33 | 84 | 0.717677 | import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
import tensorflow.contrib.layers as layers
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
boston = datasets.load_boston()
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df["target"] = boston.target
X_train, X_test, y_train, y_test = train_test_split(
df[["RM", "LSTAT", "PTRATIO"]], df[["target"]], test_size=0.3, random_state=0
)
X_train = MinMaxScaler().fit_transform(X_train)
X_test = MinMaxScaler().fit_transform(X_test)
y_train = MinMaxScaler().fit_transform(y_train)
y_test = MinMaxScaler().fit_transform(y_test)
m = len(X_train)
n = 3
n_hidden = 20
batch_size = 200
eta = 0.01
max_epoch = 1000
def multilayer_perceptron(x):
fcl = layers.fully_connected(x, n_hidden, activation_fn=tf.nn.relu, scope="fcl")
out = layers.fully_connected(fcl, 1, activation_fn=tf.nn.sigmoid, scope="out")
return out
x = tf.placeholder(tf.float32, name="X", shape=[m, n])
y = tf.placeholder(tf.float32, name="Y")
y_hat = multilayer_perceptron(x)
correct_prediction = tf.square(y - y_hat)
mse = tf.reduce_mean(tf.cast(correct_prediction, "float"))
train = tf.train.AdamOptimizer(learning_rate=eta).minimize(mse)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter("graphs", sess.graph)
for i in range(max_epoch):
_, l, p = sess.run([train, mse, y_hat], feed_dict={x: X_train, y: y_train})
if i % 100 == 0:
print(f"Epoch {i}: Loss {l}")
print("Training Done!")
correct_prediction = tf.square(y - y_hat)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Mean square error:", accuracy.eval({x: X_train, y: y_train}))
plt.scatter(y_train, p)
plt.show()
writer.close()
| true | true |
1c3b0913d27b99b70bf2d48e73a0e8605d912f7e | 277 | py | Python | whyis/blueprint/nanopub/__init__.py | tolulomo/whyis | eb50ab3301eb7efd27a1a3f6fb2305dedd910397 | [
"Apache-2.0"
] | 31 | 2018-05-30T02:41:23.000Z | 2021-10-17T01:25:20.000Z | whyis/blueprint/nanopub/__init__.py | tolulomo/whyis | eb50ab3301eb7efd27a1a3f6fb2305dedd910397 | [
"Apache-2.0"
] | 115 | 2018-04-07T00:59:11.000Z | 2022-03-02T03:06:45.000Z | whyis/blueprint/nanopub/__init__.py | tolulomo/whyis | eb50ab3301eb7efd27a1a3f6fb2305dedd910397 | [
"Apache-2.0"
] | 25 | 2018-04-07T00:49:55.000Z | 2021-09-28T14:29:18.000Z | from .nanopub_blueprint import nanopub_blueprint
from .delete_nanopub import delete_nanopub as __delete_nanopub
from .get_nanopub import get_nanopub as __get_nanopub
from .post_nanopub import post_nanopub as __post_nanopub
from .put_nanopub import put_nanopub as __put_nanopub
| 46.166667 | 62 | 0.880866 | from .nanopub_blueprint import nanopub_blueprint
from .delete_nanopub import delete_nanopub as __delete_nanopub
from .get_nanopub import get_nanopub as __get_nanopub
from .post_nanopub import post_nanopub as __post_nanopub
from .put_nanopub import put_nanopub as __put_nanopub
| true | true |
1c3b09d840e3ffbc3af67b126048f4eaf366fa05 | 11,294 | py | Python | cogdl/pipelines.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/pipelines.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/pipelines.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | import os
import random
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from numpy.lib.arraysetops import isin
import torch
import yaml
from grave import plot_network, use_attributes
from tabulate import tabulate
from cogdl import oagbert
from cogdl.data import Graph
from cogdl.tasks import build_task
from cogdl.datasets import build_dataset_from_name, NodeDataset
from cogdl.models import build_model
from cogdl.options import get_default_args
from cogdl.datasets.rec_data import build_recommendation_data
class Pipeline(object):
def __init__(self, app: str, **kwargs):
self.app = app
self.kwargs = kwargs
def __call__(self, **kwargs):
raise NotImplementedError
class DatasetPipeline(Pipeline):
def __init__(self, app: str, **kwargs):
super(DatasetPipeline, self).__init__(app, **kwargs)
def __call__(self, dataset, **kwargs):
if isinstance(dataset, str):
dataset = [dataset]
return self._call(dataset, **kwargs)
class DatasetStatsPipeline(DatasetPipeline):
def __init__(self, app: str, **kwargs):
super(DatasetStatsPipeline, self).__init__(app, **kwargs)
def _call(self, dataset=[], **kwargs):
if isinstance(dataset, str):
dataset = [dataset]
tab_data = []
col_names = [
"Dataset",
"#nodes",
"#edges",
"#features",
"#classes",
"#labeled data",
]
for name in dataset:
dataset = build_dataset_from_name(name)
data = dataset[0]
tab_data.append(
[
name,
data.x.shape[0],
data.edge_index[0].shape[0],
data.x.shape[1],
len(set(data.y.numpy())),
sum(data.train_mask.numpy()),
]
)
print(tabulate(tab_data, headers=col_names, tablefmt="psql"))
return tab_data
class DatasetVisualPipeline(DatasetPipeline):
def __init__(self, app: str, **kwargs):
super(DatasetVisualPipeline, self).__init__(app, **kwargs)
def _call(self, dataset="cora", seed=-1, depth=3, **kwargs):
if isinstance(dataset, list):
dataset = dataset[0]
name = dataset
dataset = build_dataset_from_name(name)
data = dataset[0]
G = nx.Graph()
edge_index = torch.stack(data.edge_index)
G.add_edges_from([tuple(edge_index[:, i].numpy()) for i in range(edge_index.shape[1])])
if seed == -1:
seed = random.choice(list(G.nodes()))
q = [seed]
node_set = set([seed])
node_index = {seed: 0}
max_index = 1
for _ in range(depth):
nq = []
for x in q:
for key in G[x].keys():
if key not in node_set:
nq.append(key)
node_set.add(key)
node_index[key] = node_index[x] + 1
if len(nq) > 0:
max_index += 1
q = nq
cmap = cm.rainbow(np.linspace(0.0, 1.0, max_index))
for node, index in node_index.items():
G.nodes[node]["color"] = cmap[index]
G.nodes[node]["size"] = (max_index - index) * 50
pic_file = f"{name}.png"
plt.subplots()
plot_network(G.subgraph(list(node_set)), node_style=use_attributes())
plt.savefig(pic_file)
print(f"Sampled ego network saved to {pic_file}")
return q
class OAGBertInferencePipepline(Pipeline):
def __init__(self, app: str, model: str, **kwargs):
super(OAGBertInferencePipepline, self).__init__(app, model=model, **kwargs)
load_weights = kwargs["load_weights"] if "load_weights" in kwargs else True
self.tokenizer, self.bert_model = oagbert(model, load_weights=load_weights)
def __call__(self, sequence, **kwargs):
tokens = self.tokenizer(sequence, return_tensors="pt", padding=True)
outputs = self.bert_model(**tokens)
return outputs
class GenerateEmbeddingPipeline(Pipeline):
def __init__(self, app: str, model: str, **kwargs):
super(GenerateEmbeddingPipeline, self).__init__(app, model=model, **kwargs)
match_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "match.yml")
with open(match_path, "r", encoding="utf8") as f:
match = yaml.load(f, Loader=yaml.FullLoader)
objective = match.get("unsupervised_node_classification", None)
for pair_dict in objective:
if "blogcatalog" in pair_dict["dataset"]:
emb_models = pair_dict["model"]
elif "cora" in pair_dict["dataset"]:
gnn_models = pair_dict["model"]
if model in emb_models:
self.method_type = "emb"
args = get_default_args(
task="unsupervised_node_classification", dataset="blogcatalog", model=model, **kwargs
)
elif model in gnn_models:
self.method_type = "gnn"
args = get_default_args(task="unsupervised_node_classification", dataset="cora", model=model, **kwargs)
else:
print("Please choose a model from ", emb_models, "or", gnn_models)
exit(0)
self.data_path = kwargs.get("data_path", "tmp_data.pt")
self.num_features = kwargs.get("num_features", None)
if self.num_features is not None:
args.num_features = self.num_features
elif self.method_type == "gnn":
print("Please provide num_features for gnn model!")
exit(0)
args.model = args.model[0]
self.model = build_model(args)
self.trainer = self.model.get_trainer(args)
if self.trainer is not None:
self.trainer = self.trainer(args)
def __call__(self, edge_index, x=None, edge_weight=None):
if self.method_type == "emb":
G = nx.Graph()
if edge_weight is not None:
if isinstance(edge_index, np.ndarray):
edges = np.concatenate([edge_index, np.expand_dims(edge_weight, -1)], -1)
elif isinstance(edge_index, torch.Tensor):
edges = torch.cat([edge_index, edge_weight.unsqueeze(-1)], -1)
else:
print("Please provide edges via np.ndarray or torch.Tensor.")
return
G.add_weighted_edges_from(edges.tolist())
else:
if not isinstance(edge_index, np.ndarray) and not isinstance(edge_index, torch.Tensor):
print("Please provide edges via np.ndarray or torch.Tensor.")
return
G.add_edges_from(edge_index.tolist())
embeddings = self.model.train(G)
elif self.method_type == "gnn":
num_nodes = edge_index.max().item() + 1
if x is None:
print("No input node features, use random features instead.")
np.random.randn(num_nodes, self.num_features)
if isinstance(x, np.ndarray):
x = torch.from_numpy(x).float()
if isinstance(edge_index, np.ndarray):
edge_index = torch.from_numpy(edge_index)
edge_index = (edge_index[:, 0], edge_index[:, 1])
data = Graph(x=x, edge_index=edge_index)
torch.save(data, self.data_path)
dataset = NodeDataset(path=self.data_path, scale_feat=False)
embeddings = self.trainer.fit(self.model, dataset, evaluate=False)
embeddings = embeddings.detach().cpu().numpy()
return embeddings
class RecommendationPipepline(Pipeline):
def __init__(self, app: str, model: str, **kwargs):
super(RecommendationPipepline, self).__init__(app, model=model, **kwargs)
if "data" in kwargs:
data = kwargs["data"]
val_data = test_data = data[-100:, :]
data = build_recommendation_data("custom", data, val_data, test_data)
self.data_path = kwargs.get("data_path", "tmp_data.pt")
self.batch_size = kwargs.get("batch_size", 128)
torch.save(data, self.data_path)
self.dataset = NodeDataset(path=self.data_path, scale_feat=False)
elif "dataset" in kwargs:
dataset = kwargs.pop("dataset")
self.dataset = build_dataset_from_name(dataset)
else:
print("Please provide recommendation data!")
exit(0)
self.batch_size = kwargs.get("batch_size", 2048)
self.n_items = self.dataset[0].n_params["n_items"]
args = get_default_args(task="recommendation", dataset="ali", model=model, **kwargs)
args.model = args.model[0]
task = build_task(args, dataset=self.dataset)
task.train()
self.model = task.model
self.model.eval()
self.user_emb, self.item_emb = self.model.generate()
def __call__(self, user_batch, **kwargs):
user_batch = np.array(user_batch)
user_batch = torch.from_numpy(user_batch).to(self.model.device)
u_g_embeddings = self.user_emb[user_batch]
# batch-item test
n_item_batchs = self.n_items // self.batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), self.n_items))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * self.batch_size
i_end = min((i_batch_id + 1) * self.batch_size, self.n_items)
item_batch = torch.LongTensor(np.array(range(i_start, i_end))).view(i_end - i_start).to(self.model.device)
i_g_embddings = self.item_emb[item_batch]
i_rate_batch = self.model.rating(u_g_embeddings, i_g_embddings).detach().cpu()
rate_batch[:, i_start:i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
topk = kwargs.get("topk", 10)
results = {}
for i in range(len(user_batch)):
rate = list(zip(range(self.n_items), rate_batch[i]))
rate.sort(key=lambda x: x[1], reverse=True)
results[user_batch[i].item()] = [rate[j] for j in range(min(topk, len(rate)))]
return results
SUPPORTED_APPS = {
"dataset-stats": {"impl": DatasetStatsPipeline, "default": {"dataset": "cora"}},
"dataset-visual": {"impl": DatasetVisualPipeline, "default": {"dataset": "cora"}},
"oagbert": {"impl": OAGBertInferencePipepline, "default": {"model": "oagbert-v1"}},
"generate-emb": {"impl": GenerateEmbeddingPipeline, "default": {"model": "prone"}},
"recommendation": {"impl": RecommendationPipepline, "default": {"model": "lightgcn"}},
}
def check_app(app: str):
if app in SUPPORTED_APPS:
targeted_app = SUPPORTED_APPS[app]
return targeted_app
raise KeyError("Unknown app {}, available apps are {}".format(app, list(SUPPORTED_APPS.keys())))
def pipeline(app: str, **kwargs) -> Pipeline:
targeted_app = check_app(app)
task_class = targeted_app["impl"]
default_args = targeted_app["default"].copy()
default_args.update(kwargs)
return task_class(app=app, **default_args)
| 36.668831 | 118 | 0.600673 | import os
import random
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from numpy.lib.arraysetops import isin
import torch
import yaml
from grave import plot_network, use_attributes
from tabulate import tabulate
from cogdl import oagbert
from cogdl.data import Graph
from cogdl.tasks import build_task
from cogdl.datasets import build_dataset_from_name, NodeDataset
from cogdl.models import build_model
from cogdl.options import get_default_args
from cogdl.datasets.rec_data import build_recommendation_data
class Pipeline(object):
def __init__(self, app: str, **kwargs):
self.app = app
self.kwargs = kwargs
def __call__(self, **kwargs):
raise NotImplementedError
class DatasetPipeline(Pipeline):
def __init__(self, app: str, **kwargs):
super(DatasetPipeline, self).__init__(app, **kwargs)
def __call__(self, dataset, **kwargs):
if isinstance(dataset, str):
dataset = [dataset]
return self._call(dataset, **kwargs)
class DatasetStatsPipeline(DatasetPipeline):
def __init__(self, app: str, **kwargs):
super(DatasetStatsPipeline, self).__init__(app, **kwargs)
def _call(self, dataset=[], **kwargs):
if isinstance(dataset, str):
dataset = [dataset]
tab_data = []
col_names = [
"Dataset",
"#nodes",
"#edges",
"#features",
"#classes",
"#labeled data",
]
for name in dataset:
dataset = build_dataset_from_name(name)
data = dataset[0]
tab_data.append(
[
name,
data.x.shape[0],
data.edge_index[0].shape[0],
data.x.shape[1],
len(set(data.y.numpy())),
sum(data.train_mask.numpy()),
]
)
print(tabulate(tab_data, headers=col_names, tablefmt="psql"))
return tab_data
class DatasetVisualPipeline(DatasetPipeline):
def __init__(self, app: str, **kwargs):
super(DatasetVisualPipeline, self).__init__(app, **kwargs)
def _call(self, dataset="cora", seed=-1, depth=3, **kwargs):
if isinstance(dataset, list):
dataset = dataset[0]
name = dataset
dataset = build_dataset_from_name(name)
data = dataset[0]
G = nx.Graph()
edge_index = torch.stack(data.edge_index)
G.add_edges_from([tuple(edge_index[:, i].numpy()) for i in range(edge_index.shape[1])])
if seed == -1:
seed = random.choice(list(G.nodes()))
q = [seed]
node_set = set([seed])
node_index = {seed: 0}
max_index = 1
for _ in range(depth):
nq = []
for x in q:
for key in G[x].keys():
if key not in node_set:
nq.append(key)
node_set.add(key)
node_index[key] = node_index[x] + 1
if len(nq) > 0:
max_index += 1
q = nq
cmap = cm.rainbow(np.linspace(0.0, 1.0, max_index))
for node, index in node_index.items():
G.nodes[node]["color"] = cmap[index]
G.nodes[node]["size"] = (max_index - index) * 50
pic_file = f"{name}.png"
plt.subplots()
plot_network(G.subgraph(list(node_set)), node_style=use_attributes())
plt.savefig(pic_file)
print(f"Sampled ego network saved to {pic_file}")
return q
class OAGBertInferencePipepline(Pipeline):
def __init__(self, app: str, model: str, **kwargs):
super(OAGBertInferencePipepline, self).__init__(app, model=model, **kwargs)
load_weights = kwargs["load_weights"] if "load_weights" in kwargs else True
self.tokenizer, self.bert_model = oagbert(model, load_weights=load_weights)
def __call__(self, sequence, **kwargs):
tokens = self.tokenizer(sequence, return_tensors="pt", padding=True)
outputs = self.bert_model(**tokens)
return outputs
class GenerateEmbeddingPipeline(Pipeline):
def __init__(self, app: str, model: str, **kwargs):
super(GenerateEmbeddingPipeline, self).__init__(app, model=model, **kwargs)
match_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "match.yml")
with open(match_path, "r", encoding="utf8") as f:
match = yaml.load(f, Loader=yaml.FullLoader)
objective = match.get("unsupervised_node_classification", None)
for pair_dict in objective:
if "blogcatalog" in pair_dict["dataset"]:
emb_models = pair_dict["model"]
elif "cora" in pair_dict["dataset"]:
gnn_models = pair_dict["model"]
if model in emb_models:
self.method_type = "emb"
args = get_default_args(
task="unsupervised_node_classification", dataset="blogcatalog", model=model, **kwargs
)
elif model in gnn_models:
self.method_type = "gnn"
args = get_default_args(task="unsupervised_node_classification", dataset="cora", model=model, **kwargs)
else:
print("Please choose a model from ", emb_models, "or", gnn_models)
exit(0)
self.data_path = kwargs.get("data_path", "tmp_data.pt")
self.num_features = kwargs.get("num_features", None)
if self.num_features is not None:
args.num_features = self.num_features
elif self.method_type == "gnn":
print("Please provide num_features for gnn model!")
exit(0)
args.model = args.model[0]
self.model = build_model(args)
self.trainer = self.model.get_trainer(args)
if self.trainer is not None:
self.trainer = self.trainer(args)
def __call__(self, edge_index, x=None, edge_weight=None):
if self.method_type == "emb":
G = nx.Graph()
if edge_weight is not None:
if isinstance(edge_index, np.ndarray):
edges = np.concatenate([edge_index, np.expand_dims(edge_weight, -1)], -1)
elif isinstance(edge_index, torch.Tensor):
edges = torch.cat([edge_index, edge_weight.unsqueeze(-1)], -1)
else:
print("Please provide edges via np.ndarray or torch.Tensor.")
return
G.add_weighted_edges_from(edges.tolist())
else:
if not isinstance(edge_index, np.ndarray) and not isinstance(edge_index, torch.Tensor):
print("Please provide edges via np.ndarray or torch.Tensor.")
return
G.add_edges_from(edge_index.tolist())
embeddings = self.model.train(G)
elif self.method_type == "gnn":
num_nodes = edge_index.max().item() + 1
if x is None:
print("No input node features, use random features instead.")
np.random.randn(num_nodes, self.num_features)
if isinstance(x, np.ndarray):
x = torch.from_numpy(x).float()
if isinstance(edge_index, np.ndarray):
edge_index = torch.from_numpy(edge_index)
edge_index = (edge_index[:, 0], edge_index[:, 1])
data = Graph(x=x, edge_index=edge_index)
torch.save(data, self.data_path)
dataset = NodeDataset(path=self.data_path, scale_feat=False)
embeddings = self.trainer.fit(self.model, dataset, evaluate=False)
embeddings = embeddings.detach().cpu().numpy()
return embeddings
class RecommendationPipepline(Pipeline):
def __init__(self, app: str, model: str, **kwargs):
super(RecommendationPipepline, self).__init__(app, model=model, **kwargs)
if "data" in kwargs:
data = kwargs["data"]
val_data = test_data = data[-100:, :]
data = build_recommendation_data("custom", data, val_data, test_data)
self.data_path = kwargs.get("data_path", "tmp_data.pt")
self.batch_size = kwargs.get("batch_size", 128)
torch.save(data, self.data_path)
self.dataset = NodeDataset(path=self.data_path, scale_feat=False)
elif "dataset" in kwargs:
dataset = kwargs.pop("dataset")
self.dataset = build_dataset_from_name(dataset)
else:
print("Please provide recommendation data!")
exit(0)
self.batch_size = kwargs.get("batch_size", 2048)
self.n_items = self.dataset[0].n_params["n_items"]
args = get_default_args(task="recommendation", dataset="ali", model=model, **kwargs)
args.model = args.model[0]
task = build_task(args, dataset=self.dataset)
task.train()
self.model = task.model
self.model.eval()
self.user_emb, self.item_emb = self.model.generate()
def __call__(self, user_batch, **kwargs):
user_batch = np.array(user_batch)
user_batch = torch.from_numpy(user_batch).to(self.model.device)
u_g_embeddings = self.user_emb[user_batch]
n_item_batchs = self.n_items // self.batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), self.n_items))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * self.batch_size
i_end = min((i_batch_id + 1) * self.batch_size, self.n_items)
item_batch = torch.LongTensor(np.array(range(i_start, i_end))).view(i_end - i_start).to(self.model.device)
i_g_embddings = self.item_emb[item_batch]
i_rate_batch = self.model.rating(u_g_embeddings, i_g_embddings).detach().cpu()
rate_batch[:, i_start:i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
topk = kwargs.get("topk", 10)
results = {}
for i in range(len(user_batch)):
rate = list(zip(range(self.n_items), rate_batch[i]))
rate.sort(key=lambda x: x[1], reverse=True)
results[user_batch[i].item()] = [rate[j] for j in range(min(topk, len(rate)))]
return results
SUPPORTED_APPS = {
"dataset-stats": {"impl": DatasetStatsPipeline, "default": {"dataset": "cora"}},
"dataset-visual": {"impl": DatasetVisualPipeline, "default": {"dataset": "cora"}},
"oagbert": {"impl": OAGBertInferencePipepline, "default": {"model": "oagbert-v1"}},
"generate-emb": {"impl": GenerateEmbeddingPipeline, "default": {"model": "prone"}},
"recommendation": {"impl": RecommendationPipepline, "default": {"model": "lightgcn"}},
}
def check_app(app: str):
if app in SUPPORTED_APPS:
targeted_app = SUPPORTED_APPS[app]
return targeted_app
raise KeyError("Unknown app {}, available apps are {}".format(app, list(SUPPORTED_APPS.keys())))
def pipeline(app: str, **kwargs) -> Pipeline:
targeted_app = check_app(app)
task_class = targeted_app["impl"]
default_args = targeted_app["default"].copy()
default_args.update(kwargs)
return task_class(app=app, **default_args)
| true | true |
1c3b09daa52dd6908eb9f534be81ec1437d867aa | 18,070 | py | Python | tests/unit/scm/test_git.py | larsmelcher/dvc | 71bae7e4affbc5f37aceb9412dd60822201a942d | [
"Apache-2.0"
] | 1 | 2019-10-04T13:46:03.000Z | 2019-10-04T13:46:03.000Z | tests/unit/scm/test_git.py | Indraji-Ushantha/dvc | cfcb7e12942d826c0c84c01966c0a47483b1d618 | [
"Apache-2.0"
] | 32 | 2021-10-21T17:14:55.000Z | 2022-03-31T17:18:19.000Z | tests/unit/scm/test_git.py | Indraji-Ushantha/dvc | cfcb7e12942d826c0c84c01966c0a47483b1d618 | [
"Apache-2.0"
] | null | null | null | import os
import pytest
from dvc.scm.base import SCMError
# Behaves the same as SCM but will test against all supported Git backends.
# tmp_dir.scm will still contain a default SCM instance.
@pytest.fixture(params=["gitpython", "dulwich", "pygit2"])
def git(tmp_dir, scm, request):
from dvc.scm.git import Git
git_ = Git(os.fspath(tmp_dir), backends=[request.param])
git_.test_backend = request.param
yield git_
git_.close()
@pytest.mark.parametrize(
"path, expected",
[
(os.path.join("path", "to", ".gitignore"), True),
(os.path.join("path", "to", ".git", "internal", "file"), True),
(os.path.join("some", "non-.git", "file"), False),
],
ids=["gitignore_file", "git_internal_file", "non_git_file"],
)
def test_belongs_to_scm(scm, path, expected):
assert scm.belongs_to_scm(path) == expected
def test_walk_with_submodules(tmp_dir, scm, git_dir):
git_dir.scm_gen(
{"foo": "foo", "bar": "bar", "dir": {"data": "data"}},
commit="add dir and files",
)
scm.gitpython.repo.create_submodule(
"submodule", "submodule", url=os.fspath(git_dir)
)
scm.commit("added submodule")
files = []
dirs = []
fs = scm.get_fs("HEAD")
for _, dnames, fnames in fs.walk("."):
dirs.extend(dnames)
files.extend(fnames)
# currently we don't walk through submodules
assert not dirs
assert set(files) == {".gitmodules", "submodule"}
def test_walk_onerror(tmp_dir, scm):
def onerror(exc):
raise exc
tmp_dir.scm_gen({"foo": "foo"}, commit="init")
fs = scm.get_fs("HEAD")
# path does not exist
for _ in fs.walk("dir"):
pass
with pytest.raises(OSError):
for _ in fs.walk("dir", onerror=onerror):
pass
# path is not a directory
for _ in fs.walk("foo"):
pass
with pytest.raises(OSError):
for _ in fs.walk("foo", onerror=onerror):
pass
def test_is_tracked(tmp_dir, scm):
tmp_dir.scm_gen(
{
"tracked": "tracked",
"dir": {"data": "data", "subdir": {"subdata": "subdata"}},
},
commit="add dirs and files",
)
tmp_dir.gen({"untracked": "untracked", "dir": {"untracked": "untracked"}})
# sanity check
assert (tmp_dir / "untracked").exists()
assert (tmp_dir / "tracked").exists()
assert (tmp_dir / "dir" / "untracked").exists()
assert (tmp_dir / "dir" / "data").exists()
assert (tmp_dir / "dir" / "subdir" / "subdata").exists()
assert not scm.is_tracked("untracked")
assert not scm.is_tracked(os.path.join("dir", "untracked"))
assert scm.is_tracked("tracked")
assert scm.is_tracked("dir")
assert scm.is_tracked(os.path.join("dir", "data"))
assert scm.is_tracked(os.path.join("dir", "subdir"))
assert scm.is_tracked(os.path.join("dir", "subdir", "subdata"))
def test_is_tracked_unicode(tmp_dir, scm):
tmp_dir.scm_gen("ṭṝḁḉḵḗḋ", "tracked", commit="add unicode")
tmp_dir.gen("ṳṋṭṝḁḉḵḗḋ", "untracked")
assert scm.is_tracked("ṭṝḁḉḵḗḋ")
assert not scm.is_tracked("ṳṋṭṝḁḉḵḗḋ")
def test_no_commits(tmp_dir):
from dvc.scm.git import Git
from tests.dir_helpers import git_init
git_init(".")
assert Git().no_commits
tmp_dir.gen("foo", "foo")
Git().add(["foo"])
Git().commit("foo")
assert not Git().no_commits
def test_branch_revs(tmp_dir, scm):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = scm.get_rev()
expected = []
for i in range(1, 5):
tmp_dir.scm_gen({"file": f"{i}"}, commit=f"{i}")
expected.append(scm.get_rev())
for rev in scm.branch_revs("master", init_rev):
assert rev == expected.pop()
assert len(expected) == 0
def test_set_ref(tmp_dir, git):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = tmp_dir.scm.get_rev()
tmp_dir.scm_gen({"file": "1"}, commit="commit")
commit_rev = tmp_dir.scm.get_rev()
git.set_ref("refs/foo/bar", init_rev)
assert (
init_rev
== (tmp_dir / ".git" / "refs" / "foo" / "bar").read_text().strip()
)
with pytest.raises(SCMError):
git.set_ref("refs/foo/bar", commit_rev, old_ref=commit_rev)
git.set_ref("refs/foo/bar", commit_rev, old_ref=init_rev)
assert (
commit_rev
== (tmp_dir / ".git" / "refs" / "foo" / "bar").read_text().strip()
)
git.set_ref("refs/foo/baz", "refs/heads/master", symbolic=True)
assert (
"ref: refs/heads/master"
== (tmp_dir / ".git" / "refs" / "foo" / "baz").read_text().strip()
)
def test_set_ref_with_message(tmp_dir, git):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = tmp_dir.scm.get_rev()
tmp_dir.scm_gen({"file": "1"}, commit="commit")
commit_rev = tmp_dir.scm.get_rev()
git.set_ref("refs/foo/bar", init_rev, message="init message")
assert (
"init message"
in (tmp_dir / ".git" / "logs" / "refs" / "foo" / "bar").read_text()
)
git.set_ref("refs/foo/bar", commit_rev, message="modify message")
assert (
"modify message"
in (tmp_dir / ".git" / "logs" / "refs" / "foo" / "bar").read_text()
)
def test_get_ref(tmp_dir, git):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = tmp_dir.scm.get_rev()
tmp_dir.gen(
{
os.path.join(".git", "refs", "foo", "bar"): init_rev,
os.path.join(
".git", "refs", "foo", "baz"
): "ref: refs/heads/master",
}
)
assert init_rev == git.get_ref("refs/foo/bar")
assert init_rev == git.get_ref("refs/foo/baz")
assert "refs/heads/master" == git.get_ref("refs/foo/baz", follow=False)
assert git.get_ref("refs/foo/qux") is None
def test_remove_ref(tmp_dir, git):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = tmp_dir.scm.get_rev()
tmp_dir.gen(os.path.join(".git", "refs", "foo", "bar"), init_rev)
tmp_dir.scm_gen({"file": "1"}, commit="commit")
commit_rev = tmp_dir.scm.get_rev()
with pytest.raises(SCMError):
git.remove_ref("refs/foo/bar", old_ref=commit_rev)
git.remove_ref("refs/foo/bar", old_ref=init_rev)
assert not (tmp_dir / ".git" / "refs" / "foo" / "bar").exists()
def test_refs_containing(tmp_dir, scm):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = scm.get_rev()
tmp_dir.gen(
{
os.path.join(".git", "refs", "foo", "bar"): init_rev,
os.path.join(".git", "refs", "foo", "baz"): init_rev,
}
)
expected = {"refs/foo/bar", "refs/foo/baz", "refs/heads/master"}
assert expected == set(scm.get_refs_containing(init_rev))
@pytest.mark.parametrize("use_url", [True, False])
def test_push_refspec(tmp_dir, scm, make_tmp_dir, use_url):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = scm.get_rev()
tmp_dir.gen(
{
os.path.join(".git", "refs", "foo", "bar"): init_rev,
os.path.join(".git", "refs", "foo", "baz"): init_rev,
}
)
remote_dir = make_tmp_dir("git-remote", scm=True)
url = "file://{}".format(remote_dir.resolve().as_posix())
scm.gitpython.repo.create_remote("origin", url)
with pytest.raises(SCMError):
scm.push_refspec("bad-remote", "refs/foo/bar", "refs/foo/bar")
remote = url if use_url else "origin"
scm.push_refspec(remote, "refs/foo/bar", "refs/foo/bar")
assert init_rev == remote_dir.scm.get_ref("refs/foo/bar")
remote_dir.scm.checkout("refs/foo/bar")
assert init_rev == remote_dir.scm.get_rev()
assert "0" == (remote_dir / "file").read_text()
scm.push_refspec(remote, "refs/foo/", "refs/foo/")
assert init_rev == remote_dir.scm.get_ref("refs/foo/baz")
scm.push_refspec(remote, None, "refs/foo/baz")
assert remote_dir.scm.get_ref("refs/foo/baz") is None
def test_fetch_refspecs(tmp_dir, scm, make_tmp_dir):
remote_dir = make_tmp_dir("git-remote", scm=True)
url = "file://{}".format(remote_dir.resolve().as_posix())
remote_dir.scm_gen({"file": "0"}, commit="init")
init_rev = remote_dir.scm.get_rev()
remote_dir.gen(
{
os.path.join(".git", "refs", "foo", "bar"): init_rev,
os.path.join(".git", "refs", "foo", "baz"): init_rev,
}
)
scm.fetch_refspecs(
url, ["refs/foo/bar:refs/foo/bar", "refs/foo/baz:refs/foo/baz"]
)
assert init_rev == scm.get_ref("refs/foo/bar")
assert init_rev == scm.get_ref("refs/foo/baz")
remote_dir.scm.checkout("refs/foo/bar")
assert init_rev == remote_dir.scm.get_rev()
assert "0" == (remote_dir / "file").read_text()
def test_list_all_commits(tmp_dir, scm):
tmp_dir.scm_gen("1", "1", commit="1")
rev_a = scm.get_rev()
tmp_dir.scm_gen("2", "2", commit="2")
rev_b = scm.get_rev()
scm.tag("tag")
tmp_dir.scm_gen("3", "3", commit="3")
rev_c = scm.get_rev()
scm.gitpython.git.reset(rev_a, hard=True)
scm.set_ref("refs/foo/bar", rev_c)
assert {rev_a, rev_b} == set(scm.list_all_commits())
def test_ignore_remove_empty(tmp_dir, scm, git):
if git.test_backend == "pygit2":
pytest.skip()
test_entries = [
{"entry": "/foo1", "path": f"{tmp_dir}/foo1"},
{"entry": "/foo2", "path": f"{tmp_dir}/foo2"},
]
path_to_gitignore = tmp_dir / ".gitignore"
with open(path_to_gitignore, "a") as f:
for entry in test_entries:
f.write(entry["entry"] + "\n")
assert path_to_gitignore.exists()
git.ignore_remove(test_entries[0]["path"])
assert path_to_gitignore.exists()
git.ignore_remove(test_entries[1]["path"])
assert not path_to_gitignore.exists()
@pytest.mark.skipif(
os.name == "nt", reason="Git hooks not supported on Windows"
)
@pytest.mark.parametrize("hook", ["pre-commit", "commit-msg"])
def test_commit_no_verify(tmp_dir, scm, git, hook):
import stat
if git.test_backend == "pygit2":
pytest.skip()
hook_file = os.path.join(".git", "hooks", hook)
tmp_dir.gen(hook_file, "#!/usr/bin/env python\nimport sys\nsys.exit(1)")
os.chmod(hook_file, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_dir.gen("foo", "foo")
git.add(["foo"])
with pytest.raises(SCMError):
git.commit("commit foo")
git.commit("commit foo", no_verify=True)
@pytest.mark.parametrize("squash", [True, False])
def test_merge(tmp_dir, scm, git, squash):
from dvc.scm.base import MergeConflictError
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen("foo", "foo", commit="init")
init_rev = scm.get_rev()
scm.checkout("branch", create_new=True)
tmp_dir.scm_gen("foo", "bar", commit="bar")
branch = scm.resolve_rev("branch")
scm.checkout("master")
with pytest.raises(MergeConflictError):
tmp_dir.scm_gen("foo", "baz", commit="baz")
git.merge(branch, commit=not squash, squash=squash, msg="merge")
scm.gitpython.git.reset(init_rev, hard=True)
merge_rev = git.merge(
branch, commit=not squash, squash=squash, msg="merge"
)
assert (tmp_dir / "foo").read_text() == "bar"
if squash:
assert merge_rev is None
assert scm.get_rev() == init_rev
else:
assert scm.get_rev() == merge_rev
def test_checkout_index(tmp_dir, scm, git):
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen(
{"foo": "foo", "bar": "bar", "dir": {"baz": "baz"}}, commit="init"
)
tmp_dir.gen({"foo": "baz", "dir": {"baz": "foo"}})
with (tmp_dir / "dir").chdir():
git.checkout_index([os.path.join("..", "foo"), "baz"], force=True)
assert (tmp_dir / "foo").read_text() == "foo"
assert (tmp_dir / "dir" / "baz").read_text() == "baz"
tmp_dir.gen({"foo": "baz", "bar": "baz", "dir": {"baz": "foo"}})
git.checkout_index(force=True)
assert (tmp_dir / "foo").read_text() == "foo"
assert (tmp_dir / "bar").read_text() == "bar"
assert (tmp_dir / "dir" / "baz").read_text() == "baz"
@pytest.mark.parametrize(
"strategy, expected", [("ours", "baz"), ("theirs", "bar")]
)
def test_checkout_index_conflicts(tmp_dir, scm, git, strategy, expected):
from dvc.scm.base import MergeConflictError
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen({"file": "foo"}, commit="init")
scm.checkout("branch", create_new=True)
tmp_dir.scm_gen({"file": "bar"}, commit="bar")
bar = scm.get_rev()
scm.checkout("master")
tmp_dir.scm_gen({"file": "baz"}, commit="baz")
try:
git.merge(bar, commit=False, squash=True)
except MergeConflictError:
if strategy == "ours":
git.checkout_index(ours=True)
else:
git.checkout_index(theirs=True)
assert (tmp_dir / "file").read_text() == expected
def test_resolve_rev(tmp_dir, scm, make_tmp_dir, git):
from dvc.scm.base import RevError
if git.test_backend == "dulwich":
pytest.skip()
remote_dir = make_tmp_dir("git-remote", scm=True)
url = "file://{}".format(remote_dir.resolve().as_posix())
scm.gitpython.repo.create_remote("origin", url)
scm.gitpython.repo.create_remote("upstream", url)
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = scm.get_rev()
tmp_dir.scm_gen({"file": "1"}, commit="1")
rev = scm.get_rev()
scm.checkout("branch", create_new=True)
tmp_dir.gen(
{
os.path.join(".git", "refs", "foo"): rev,
os.path.join(".git", "refs", "remotes", "origin", "bar"): rev,
os.path.join(".git", "refs", "remotes", "origin", "baz"): rev,
os.path.join(
".git", "refs", "remotes", "upstream", "baz"
): init_rev,
}
)
assert git.resolve_rev(rev) == rev
assert git.resolve_rev(rev[:7]) == rev
assert git.resolve_rev("HEAD") == rev
assert git.resolve_rev("branch") == rev
assert git.resolve_rev("refs/foo") == rev
assert git.resolve_rev("bar") == rev
assert git.resolve_rev("origin/baz") == rev
with pytest.raises(RevError):
git.resolve_rev("qux")
with pytest.raises(RevError):
git.resolve_rev("baz")
def test_checkout(tmp_dir, scm, git):
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen({"foo": "foo"}, commit="foo")
foo_rev = scm.get_rev()
tmp_dir.scm_gen("foo", "bar", commit="bar")
bar_rev = scm.get_rev()
git.checkout("branch", create_new=True)
assert git.get_ref("HEAD", follow=False) == "refs/heads/branch"
assert (tmp_dir / "foo").read_text() == "bar"
git.checkout("master", detach=True)
assert git.get_ref("HEAD", follow=False) == bar_rev
git.checkout("master")
assert git.get_ref("HEAD", follow=False) == "refs/heads/master"
git.checkout(foo_rev[:7])
assert git.get_ref("HEAD", follow=False) == foo_rev
assert (tmp_dir / "foo").read_text() == "foo"
def test_reset(tmp_dir, scm, git):
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen({"foo": "foo", "dir": {"baz": "baz"}}, commit="init")
tmp_dir.gen({"foo": "bar", "dir": {"baz": "bar"}})
scm.add(["foo", os.path.join("dir", "baz")])
git.reset()
assert (tmp_dir / "foo").read_text() == "bar"
assert (tmp_dir / "dir" / "baz").read_text() == "bar"
staged, unstaged, _ = scm.status()
assert len(staged) == 0
assert set(unstaged) == {"foo", "dir/baz"}
scm.add(["foo", os.path.join("dir", "baz")])
git.reset(hard=True)
assert (tmp_dir / "foo").read_text() == "foo"
assert (tmp_dir / "dir" / "baz").read_text() == "baz"
staged, unstaged, _ = scm.status()
assert len(staged) == 0
assert len(unstaged) == 0
tmp_dir.gen({"foo": "bar", "bar": "bar", "dir": {"baz": "bar"}})
scm.add(["foo", "bar", os.path.join("dir", "baz")])
with (tmp_dir / "dir").chdir():
git.reset(paths=[os.path.join("..", "foo"), os.path.join("baz")])
assert (tmp_dir / "foo").read_text() == "bar"
assert (tmp_dir / "bar").read_text() == "bar"
assert (tmp_dir / "dir" / "baz").read_text() == "bar"
staged, unstaged, _ = scm.status()
assert len(staged) == 1
assert len(unstaged) == 2
def test_remind_to_track(scm, caplog):
scm.files_to_track = ["fname with spaces.txt", "тест", "foo"]
scm.remind_to_track()
assert "git add 'fname with spaces.txt' 'тест' foo" in caplog.text
def test_add(tmp_dir, scm, git):
if git.test_backend == "pygit2":
pytest.skip()
tmp_dir.gen({"foo": "foo", "bar": "bar", "dir": {"baz": "baz"}})
git.add(["foo", "dir"])
staged, unstaged, untracked = scm.status()
assert set(staged["add"]) == {"foo", "dir/baz"}
assert len(unstaged) == 0
assert len(untracked) == 1
scm.commit("commit")
tmp_dir.gen({"foo": "bar", "dir": {"baz": "bar"}})
git.add([], update=True)
staged, unstaged, _ = scm.status()
assert set(staged["modify"]) == {"foo", "dir/baz"}
assert len(unstaged) == 0
assert len(untracked) == 1
scm.reset()
git.add(["dir"], update=True)
staged, unstaged, _ = scm.status()
assert set(staged["modify"]) == {"dir/baz"}
assert len(unstaged) == 1
assert len(untracked) == 1
@pytest.mark.parametrize("use_sha", [True, False])
def test_pygit_resolve_refish(tmp_dir, scm, git, use_sha):
import pygit2
if git.test_backend != "pygit2":
pytest.skip()
tmp_dir.scm_gen("foo", "foo", commit="foo")
head = scm.get_rev()
tag = "my_tag"
scm.gitpython.git.tag("-a", tag, "-m", "create annotated tag")
if use_sha:
# refish will be annotated tag SHA (not commit SHA)
ref = git.pygit2.repo.references.get(f"refs/tags/{tag}")
refish = str(ref.target)
else:
refish = tag
assert refish != head
commit, ref = git.pygit2._resolve_refish(refish)
assert isinstance(commit, pygit2.Commit)
assert str(commit.id) == head
if not use_sha:
assert ref.name == f"refs/tags/{tag}"
| 30.994854 | 78 | 0.603154 | import os
import pytest
from dvc.scm.base import SCMError
@pytest.fixture(params=["gitpython", "dulwich", "pygit2"])
def git(tmp_dir, scm, request):
from dvc.scm.git import Git
git_ = Git(os.fspath(tmp_dir), backends=[request.param])
git_.test_backend = request.param
yield git_
git_.close()
@pytest.mark.parametrize(
"path, expected",
[
(os.path.join("path", "to", ".gitignore"), True),
(os.path.join("path", "to", ".git", "internal", "file"), True),
(os.path.join("some", "non-.git", "file"), False),
],
ids=["gitignore_file", "git_internal_file", "non_git_file"],
)
def test_belongs_to_scm(scm, path, expected):
assert scm.belongs_to_scm(path) == expected
def test_walk_with_submodules(tmp_dir, scm, git_dir):
git_dir.scm_gen(
{"foo": "foo", "bar": "bar", "dir": {"data": "data"}},
commit="add dir and files",
)
scm.gitpython.repo.create_submodule(
"submodule", "submodule", url=os.fspath(git_dir)
)
scm.commit("added submodule")
files = []
dirs = []
fs = scm.get_fs("HEAD")
for _, dnames, fnames in fs.walk("."):
dirs.extend(dnames)
files.extend(fnames)
assert not dirs
assert set(files) == {".gitmodules", "submodule"}
def test_walk_onerror(tmp_dir, scm):
def onerror(exc):
raise exc
tmp_dir.scm_gen({"foo": "foo"}, commit="init")
fs = scm.get_fs("HEAD")
# path does not exist
for _ in fs.walk("dir"):
pass
with pytest.raises(OSError):
for _ in fs.walk("dir", onerror=onerror):
pass
# path is not a directory
for _ in fs.walk("foo"):
pass
with pytest.raises(OSError):
for _ in fs.walk("foo", onerror=onerror):
pass
def test_is_tracked(tmp_dir, scm):
tmp_dir.scm_gen(
{
"tracked": "tracked",
"dir": {"data": "data", "subdir": {"subdata": "subdata"}},
},
commit="add dirs and files",
)
tmp_dir.gen({"untracked": "untracked", "dir": {"untracked": "untracked"}})
# sanity check
assert (tmp_dir / "untracked").exists()
assert (tmp_dir / "tracked").exists()
assert (tmp_dir / "dir" / "untracked").exists()
assert (tmp_dir / "dir" / "data").exists()
assert (tmp_dir / "dir" / "subdir" / "subdata").exists()
assert not scm.is_tracked("untracked")
assert not scm.is_tracked(os.path.join("dir", "untracked"))
assert scm.is_tracked("tracked")
assert scm.is_tracked("dir")
assert scm.is_tracked(os.path.join("dir", "data"))
assert scm.is_tracked(os.path.join("dir", "subdir"))
assert scm.is_tracked(os.path.join("dir", "subdir", "subdata"))
def test_is_tracked_unicode(tmp_dir, scm):
tmp_dir.scm_gen("ṭṝḁḉḵḗḋ", "tracked", commit="add unicode")
tmp_dir.gen("ṳṋṭṝḁḉḵḗḋ", "untracked")
assert scm.is_tracked("ṭṝḁḉḵḗḋ")
assert not scm.is_tracked("ṳṋṭṝḁḉḵḗḋ")
def test_no_commits(tmp_dir):
from dvc.scm.git import Git
from tests.dir_helpers import git_init
git_init(".")
assert Git().no_commits
tmp_dir.gen("foo", "foo")
Git().add(["foo"])
Git().commit("foo")
assert not Git().no_commits
def test_branch_revs(tmp_dir, scm):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = scm.get_rev()
expected = []
for i in range(1, 5):
tmp_dir.scm_gen({"file": f"{i}"}, commit=f"{i}")
expected.append(scm.get_rev())
for rev in scm.branch_revs("master", init_rev):
assert rev == expected.pop()
assert len(expected) == 0
def test_set_ref(tmp_dir, git):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = tmp_dir.scm.get_rev()
tmp_dir.scm_gen({"file": "1"}, commit="commit")
commit_rev = tmp_dir.scm.get_rev()
git.set_ref("refs/foo/bar", init_rev)
assert (
init_rev
== (tmp_dir / ".git" / "refs" / "foo" / "bar").read_text().strip()
)
with pytest.raises(SCMError):
git.set_ref("refs/foo/bar", commit_rev, old_ref=commit_rev)
git.set_ref("refs/foo/bar", commit_rev, old_ref=init_rev)
assert (
commit_rev
== (tmp_dir / ".git" / "refs" / "foo" / "bar").read_text().strip()
)
git.set_ref("refs/foo/baz", "refs/heads/master", symbolic=True)
assert (
"ref: refs/heads/master"
== (tmp_dir / ".git" / "refs" / "foo" / "baz").read_text().strip()
)
def test_set_ref_with_message(tmp_dir, git):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = tmp_dir.scm.get_rev()
tmp_dir.scm_gen({"file": "1"}, commit="commit")
commit_rev = tmp_dir.scm.get_rev()
git.set_ref("refs/foo/bar", init_rev, message="init message")
assert (
"init message"
in (tmp_dir / ".git" / "logs" / "refs" / "foo" / "bar").read_text()
)
git.set_ref("refs/foo/bar", commit_rev, message="modify message")
assert (
"modify message"
in (tmp_dir / ".git" / "logs" / "refs" / "foo" / "bar").read_text()
)
def test_get_ref(tmp_dir, git):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = tmp_dir.scm.get_rev()
tmp_dir.gen(
{
os.path.join(".git", "refs", "foo", "bar"): init_rev,
os.path.join(
".git", "refs", "foo", "baz"
): "ref: refs/heads/master",
}
)
assert init_rev == git.get_ref("refs/foo/bar")
assert init_rev == git.get_ref("refs/foo/baz")
assert "refs/heads/master" == git.get_ref("refs/foo/baz", follow=False)
assert git.get_ref("refs/foo/qux") is None
def test_remove_ref(tmp_dir, git):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = tmp_dir.scm.get_rev()
tmp_dir.gen(os.path.join(".git", "refs", "foo", "bar"), init_rev)
tmp_dir.scm_gen({"file": "1"}, commit="commit")
commit_rev = tmp_dir.scm.get_rev()
with pytest.raises(SCMError):
git.remove_ref("refs/foo/bar", old_ref=commit_rev)
git.remove_ref("refs/foo/bar", old_ref=init_rev)
assert not (tmp_dir / ".git" / "refs" / "foo" / "bar").exists()
def test_refs_containing(tmp_dir, scm):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = scm.get_rev()
tmp_dir.gen(
{
os.path.join(".git", "refs", "foo", "bar"): init_rev,
os.path.join(".git", "refs", "foo", "baz"): init_rev,
}
)
expected = {"refs/foo/bar", "refs/foo/baz", "refs/heads/master"}
assert expected == set(scm.get_refs_containing(init_rev))
@pytest.mark.parametrize("use_url", [True, False])
def test_push_refspec(tmp_dir, scm, make_tmp_dir, use_url):
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = scm.get_rev()
tmp_dir.gen(
{
os.path.join(".git", "refs", "foo", "bar"): init_rev,
os.path.join(".git", "refs", "foo", "baz"): init_rev,
}
)
remote_dir = make_tmp_dir("git-remote", scm=True)
url = "file://{}".format(remote_dir.resolve().as_posix())
scm.gitpython.repo.create_remote("origin", url)
with pytest.raises(SCMError):
scm.push_refspec("bad-remote", "refs/foo/bar", "refs/foo/bar")
remote = url if use_url else "origin"
scm.push_refspec(remote, "refs/foo/bar", "refs/foo/bar")
assert init_rev == remote_dir.scm.get_ref("refs/foo/bar")
remote_dir.scm.checkout("refs/foo/bar")
assert init_rev == remote_dir.scm.get_rev()
assert "0" == (remote_dir / "file").read_text()
scm.push_refspec(remote, "refs/foo/", "refs/foo/")
assert init_rev == remote_dir.scm.get_ref("refs/foo/baz")
scm.push_refspec(remote, None, "refs/foo/baz")
assert remote_dir.scm.get_ref("refs/foo/baz") is None
def test_fetch_refspecs(tmp_dir, scm, make_tmp_dir):
remote_dir = make_tmp_dir("git-remote", scm=True)
url = "file://{}".format(remote_dir.resolve().as_posix())
remote_dir.scm_gen({"file": "0"}, commit="init")
init_rev = remote_dir.scm.get_rev()
remote_dir.gen(
{
os.path.join(".git", "refs", "foo", "bar"): init_rev,
os.path.join(".git", "refs", "foo", "baz"): init_rev,
}
)
scm.fetch_refspecs(
url, ["refs/foo/bar:refs/foo/bar", "refs/foo/baz:refs/foo/baz"]
)
assert init_rev == scm.get_ref("refs/foo/bar")
assert init_rev == scm.get_ref("refs/foo/baz")
remote_dir.scm.checkout("refs/foo/bar")
assert init_rev == remote_dir.scm.get_rev()
assert "0" == (remote_dir / "file").read_text()
def test_list_all_commits(tmp_dir, scm):
tmp_dir.scm_gen("1", "1", commit="1")
rev_a = scm.get_rev()
tmp_dir.scm_gen("2", "2", commit="2")
rev_b = scm.get_rev()
scm.tag("tag")
tmp_dir.scm_gen("3", "3", commit="3")
rev_c = scm.get_rev()
scm.gitpython.git.reset(rev_a, hard=True)
scm.set_ref("refs/foo/bar", rev_c)
assert {rev_a, rev_b} == set(scm.list_all_commits())
def test_ignore_remove_empty(tmp_dir, scm, git):
if git.test_backend == "pygit2":
pytest.skip()
test_entries = [
{"entry": "/foo1", "path": f"{tmp_dir}/foo1"},
{"entry": "/foo2", "path": f"{tmp_dir}/foo2"},
]
path_to_gitignore = tmp_dir / ".gitignore"
with open(path_to_gitignore, "a") as f:
for entry in test_entries:
f.write(entry["entry"] + "\n")
assert path_to_gitignore.exists()
git.ignore_remove(test_entries[0]["path"])
assert path_to_gitignore.exists()
git.ignore_remove(test_entries[1]["path"])
assert not path_to_gitignore.exists()
@pytest.mark.skipif(
os.name == "nt", reason="Git hooks not supported on Windows"
)
@pytest.mark.parametrize("hook", ["pre-commit", "commit-msg"])
def test_commit_no_verify(tmp_dir, scm, git, hook):
import stat
if git.test_backend == "pygit2":
pytest.skip()
hook_file = os.path.join(".git", "hooks", hook)
tmp_dir.gen(hook_file, "#!/usr/bin/env python\nimport sys\nsys.exit(1)")
os.chmod(hook_file, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_dir.gen("foo", "foo")
git.add(["foo"])
with pytest.raises(SCMError):
git.commit("commit foo")
git.commit("commit foo", no_verify=True)
@pytest.mark.parametrize("squash", [True, False])
def test_merge(tmp_dir, scm, git, squash):
from dvc.scm.base import MergeConflictError
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen("foo", "foo", commit="init")
init_rev = scm.get_rev()
scm.checkout("branch", create_new=True)
tmp_dir.scm_gen("foo", "bar", commit="bar")
branch = scm.resolve_rev("branch")
scm.checkout("master")
with pytest.raises(MergeConflictError):
tmp_dir.scm_gen("foo", "baz", commit="baz")
git.merge(branch, commit=not squash, squash=squash, msg="merge")
scm.gitpython.git.reset(init_rev, hard=True)
merge_rev = git.merge(
branch, commit=not squash, squash=squash, msg="merge"
)
assert (tmp_dir / "foo").read_text() == "bar"
if squash:
assert merge_rev is None
assert scm.get_rev() == init_rev
else:
assert scm.get_rev() == merge_rev
def test_checkout_index(tmp_dir, scm, git):
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen(
{"foo": "foo", "bar": "bar", "dir": {"baz": "baz"}}, commit="init"
)
tmp_dir.gen({"foo": "baz", "dir": {"baz": "foo"}})
with (tmp_dir / "dir").chdir():
git.checkout_index([os.path.join("..", "foo"), "baz"], force=True)
assert (tmp_dir / "foo").read_text() == "foo"
assert (tmp_dir / "dir" / "baz").read_text() == "baz"
tmp_dir.gen({"foo": "baz", "bar": "baz", "dir": {"baz": "foo"}})
git.checkout_index(force=True)
assert (tmp_dir / "foo").read_text() == "foo"
assert (tmp_dir / "bar").read_text() == "bar"
assert (tmp_dir / "dir" / "baz").read_text() == "baz"
@pytest.mark.parametrize(
"strategy, expected", [("ours", "baz"), ("theirs", "bar")]
)
def test_checkout_index_conflicts(tmp_dir, scm, git, strategy, expected):
from dvc.scm.base import MergeConflictError
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen({"file": "foo"}, commit="init")
scm.checkout("branch", create_new=True)
tmp_dir.scm_gen({"file": "bar"}, commit="bar")
bar = scm.get_rev()
scm.checkout("master")
tmp_dir.scm_gen({"file": "baz"}, commit="baz")
try:
git.merge(bar, commit=False, squash=True)
except MergeConflictError:
if strategy == "ours":
git.checkout_index(ours=True)
else:
git.checkout_index(theirs=True)
assert (tmp_dir / "file").read_text() == expected
def test_resolve_rev(tmp_dir, scm, make_tmp_dir, git):
from dvc.scm.base import RevError
if git.test_backend == "dulwich":
pytest.skip()
remote_dir = make_tmp_dir("git-remote", scm=True)
url = "file://{}".format(remote_dir.resolve().as_posix())
scm.gitpython.repo.create_remote("origin", url)
scm.gitpython.repo.create_remote("upstream", url)
tmp_dir.scm_gen({"file": "0"}, commit="init")
init_rev = scm.get_rev()
tmp_dir.scm_gen({"file": "1"}, commit="1")
rev = scm.get_rev()
scm.checkout("branch", create_new=True)
tmp_dir.gen(
{
os.path.join(".git", "refs", "foo"): rev,
os.path.join(".git", "refs", "remotes", "origin", "bar"): rev,
os.path.join(".git", "refs", "remotes", "origin", "baz"): rev,
os.path.join(
".git", "refs", "remotes", "upstream", "baz"
): init_rev,
}
)
assert git.resolve_rev(rev) == rev
assert git.resolve_rev(rev[:7]) == rev
assert git.resolve_rev("HEAD") == rev
assert git.resolve_rev("branch") == rev
assert git.resolve_rev("refs/foo") == rev
assert git.resolve_rev("bar") == rev
assert git.resolve_rev("origin/baz") == rev
with pytest.raises(RevError):
git.resolve_rev("qux")
with pytest.raises(RevError):
git.resolve_rev("baz")
def test_checkout(tmp_dir, scm, git):
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen({"foo": "foo"}, commit="foo")
foo_rev = scm.get_rev()
tmp_dir.scm_gen("foo", "bar", commit="bar")
bar_rev = scm.get_rev()
git.checkout("branch", create_new=True)
assert git.get_ref("HEAD", follow=False) == "refs/heads/branch"
assert (tmp_dir / "foo").read_text() == "bar"
git.checkout("master", detach=True)
assert git.get_ref("HEAD", follow=False) == bar_rev
git.checkout("master")
assert git.get_ref("HEAD", follow=False) == "refs/heads/master"
git.checkout(foo_rev[:7])
assert git.get_ref("HEAD", follow=False) == foo_rev
assert (tmp_dir / "foo").read_text() == "foo"
def test_reset(tmp_dir, scm, git):
if git.test_backend == "dulwich":
pytest.skip()
tmp_dir.scm_gen({"foo": "foo", "dir": {"baz": "baz"}}, commit="init")
tmp_dir.gen({"foo": "bar", "dir": {"baz": "bar"}})
scm.add(["foo", os.path.join("dir", "baz")])
git.reset()
assert (tmp_dir / "foo").read_text() == "bar"
assert (tmp_dir / "dir" / "baz").read_text() == "bar"
staged, unstaged, _ = scm.status()
assert len(staged) == 0
assert set(unstaged) == {"foo", "dir/baz"}
scm.add(["foo", os.path.join("dir", "baz")])
git.reset(hard=True)
assert (tmp_dir / "foo").read_text() == "foo"
assert (tmp_dir / "dir" / "baz").read_text() == "baz"
staged, unstaged, _ = scm.status()
assert len(staged) == 0
assert len(unstaged) == 0
tmp_dir.gen({"foo": "bar", "bar": "bar", "dir": {"baz": "bar"}})
scm.add(["foo", "bar", os.path.join("dir", "baz")])
with (tmp_dir / "dir").chdir():
git.reset(paths=[os.path.join("..", "foo"), os.path.join("baz")])
assert (tmp_dir / "foo").read_text() == "bar"
assert (tmp_dir / "bar").read_text() == "bar"
assert (tmp_dir / "dir" / "baz").read_text() == "bar"
staged, unstaged, _ = scm.status()
assert len(staged) == 1
assert len(unstaged) == 2
def test_remind_to_track(scm, caplog):
scm.files_to_track = ["fname with spaces.txt", "тест", "foo"]
scm.remind_to_track()
assert "git add 'fname with spaces.txt' 'тест' foo" in caplog.text
def test_add(tmp_dir, scm, git):
if git.test_backend == "pygit2":
pytest.skip()
tmp_dir.gen({"foo": "foo", "bar": "bar", "dir": {"baz": "baz"}})
git.add(["foo", "dir"])
staged, unstaged, untracked = scm.status()
assert set(staged["add"]) == {"foo", "dir/baz"}
assert len(unstaged) == 0
assert len(untracked) == 1
scm.commit("commit")
tmp_dir.gen({"foo": "bar", "dir": {"baz": "bar"}})
git.add([], update=True)
staged, unstaged, _ = scm.status()
assert set(staged["modify"]) == {"foo", "dir/baz"}
assert len(unstaged) == 0
assert len(untracked) == 1
scm.reset()
git.add(["dir"], update=True)
staged, unstaged, _ = scm.status()
assert set(staged["modify"]) == {"dir/baz"}
assert len(unstaged) == 1
assert len(untracked) == 1
@pytest.mark.parametrize("use_sha", [True, False])
def test_pygit_resolve_refish(tmp_dir, scm, git, use_sha):
import pygit2
if git.test_backend != "pygit2":
pytest.skip()
tmp_dir.scm_gen("foo", "foo", commit="foo")
head = scm.get_rev()
tag = "my_tag"
scm.gitpython.git.tag("-a", tag, "-m", "create annotated tag")
if use_sha:
# refish will be annotated tag SHA (not commit SHA)
ref = git.pygit2.repo.references.get(f"refs/tags/{tag}")
refish = str(ref.target)
else:
refish = tag
assert refish != head
commit, ref = git.pygit2._resolve_refish(refish)
assert isinstance(commit, pygit2.Commit)
assert str(commit.id) == head
if not use_sha:
assert ref.name == f"refs/tags/{tag}"
| true | true |
1c3b0a5e82ed0aa464b3dca8e0a8d8ba63c5bbca | 1,347 | py | Python | curtains/stepper_driver.py | KevinMidboe/curtains_avantGarde | ad991518fd5465c479c2257a3f58d07d9038d59f | [
"MIT"
] | null | null | null | curtains/stepper_driver.py | KevinMidboe/curtains_avantGarde | ad991518fd5465c479c2257a3f58d07d9038d59f | [
"MIT"
] | null | null | null | curtains/stepper_driver.py | KevinMidboe/curtains_avantGarde | ad991518fd5465c479c2257a3f58d07d9038d59f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: KevinMidboe
# @Date: 2017-08-25 16:04:49
# @Last Modified by: KevinMidboe
# @Last Modified time: 2017-08-25 17:34:07
from time import sleep
import RPi.GPIO as gpio
class Stepper:
# Initiate stepper
# Pins = [pin1, pin2, pin3, pin4]
def __init__(self, pins):
self.pins = pins
gpio.setmode(gpio.BCM)
for pin in self.pins:
gpio.setup(pin, gpio.OUT)
gpio.output(pin, False)
# Clears the GPIO settings
def cleanGPIO(self):
gpio.cleanup()
def rotate(self, l, n=1):
return l[n:] + l[:n]
def togglePin(self, pins):
for pin in self.pins:
if pin in pins:
gpio.output(pin, True)
else:
gpio.output(pin, False)
sleep(0.001)
def step(self, rotations, dir, speed=1, forever=False):
for pin in self.pins:
gpio.output(pin, True)
turnLeft = True
if (dir == 'right'):
turnLeft = False
elif (dir != 'left'):
raise ValueError('STEPPER ERROR: no direction supplied')
return False
steps = rotations * 500
pinState = self.pins
while steps > 0:
for i in range(2):
self.togglePin([pinState[0]])
self.togglePin([pinState[0], pinState[1]])
pinState = self.rotate(pinState)
steps -=1
if __name__ == '__main__':
pins = [17, 18, 27, 22]
stepper = Stepper(pins)
stepper.step(10, 'left')
stepper.cleanGPIO()
| 19.521739 | 59 | 0.646622 |
from time import sleep
import RPi.GPIO as gpio
class Stepper:
def __init__(self, pins):
self.pins = pins
gpio.setmode(gpio.BCM)
for pin in self.pins:
gpio.setup(pin, gpio.OUT)
gpio.output(pin, False)
def cleanGPIO(self):
gpio.cleanup()
def rotate(self, l, n=1):
return l[n:] + l[:n]
def togglePin(self, pins):
for pin in self.pins:
if pin in pins:
gpio.output(pin, True)
else:
gpio.output(pin, False)
sleep(0.001)
def step(self, rotations, dir, speed=1, forever=False):
for pin in self.pins:
gpio.output(pin, True)
turnLeft = True
if (dir == 'right'):
turnLeft = False
elif (dir != 'left'):
raise ValueError('STEPPER ERROR: no direction supplied')
return False
steps = rotations * 500
pinState = self.pins
while steps > 0:
for i in range(2):
self.togglePin([pinState[0]])
self.togglePin([pinState[0], pinState[1]])
pinState = self.rotate(pinState)
steps -=1
if __name__ == '__main__':
pins = [17, 18, 27, 22]
stepper = Stepper(pins)
stepper.step(10, 'left')
stepper.cleanGPIO()
| true | true |
1c3b0b21cd525f8bf44163e71c6a22d31c564b40 | 5,240 | py | Python | marconi/openstack/common/cache/_backends/memory.py | andrew-vant/marconi | 9afe7ba27efe597cca8baac77fd0aa387c75001a | [
"Apache-2.0"
] | null | null | null | marconi/openstack/common/cache/_backends/memory.py | andrew-vant/marconi | 9afe7ba27efe597cca8baac77fd0aa387c75001a | [
"Apache-2.0"
] | null | null | null | marconi/openstack/common/cache/_backends/memory.py | andrew-vant/marconi | 9afe7ba27efe597cca8baac77fd0aa387c75001a | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from marconi.openstack.common.cache import backends
from marconi.openstack.common import lockutils
from marconi.openstack.common import timeutils
class MemoryBackend(backends.BaseCache):
def __init__(self, parsed_url, options=None):
super(MemoryBackend, self).__init__(parsed_url, options)
self._clear()
def _set_unlocked(self, key, value, ttl=0):
expires_at = 0
if ttl != 0:
expires_at = timeutils.utcnow_ts() + ttl
self._cache[key] = (expires_at, value)
if expires_at:
self._keys_expires[expires_at].add(key)
def _set(self, key, value, ttl=0, not_exists=False):
with lockutils.lock(key):
# NOTE(flaper87): This is needed just in `set`
# calls, hence it's not in `_set_unlocked`
if not_exists and self._exists_unlocked(key):
return False
self._set_unlocked(key, value, ttl)
return True
def _get_unlocked(self, key, default=None):
now = timeutils.utcnow_ts()
try:
timeout, value = self._cache[key]
except KeyError:
return (0, default)
if timeout and now >= timeout:
# NOTE(flaper87): Record expired,
# remove it from the cache but catch
# KeyError and ValueError in case
# _purge_expired removed this key already.
try:
del self._cache[key]
except KeyError:
pass
try:
# NOTE(flaper87): Keys with ttl == 0
# don't exist in the _keys_expires dict
self._keys_expires[timeout].remove(key)
except (KeyError, ValueError):
pass
return (0, default)
return (timeout, value)
def _get(self, key, default=None):
with lockutils.lock(key):
return self._get_unlocked(key, default)[1]
def _exists_unlocked(self, key):
now = timeutils.utcnow_ts()
try:
timeout = self._cache[key][0]
return not timeout or now <= timeout
except KeyError:
return False
def __contains__(self, key):
with lockutils.lock(key):
return self._exists_unlocked(key)
def _incr_append(self, key, other):
with lockutils.lock(key):
timeout, value = self._get_unlocked(key)
if value is None:
return None
ttl = timeutils.utcnow_ts() - timeout
new_value = value + other
self._set_unlocked(key, new_value, ttl)
return new_value
def _incr(self, key, delta):
if not isinstance(delta, int):
raise TypeError('delta must be an int instance')
return self._incr_append(key, delta)
def _append_tail(self, key, tail):
return self._incr_append(key, tail)
def _purge_expired(self):
"""Removes expired keys from the cache."""
now = timeutils.utcnow_ts()
for timeout in sorted(self._keys_expires.keys()):
# NOTE(flaper87): If timeout is greater
# than `now`, stop the iteration, remaining
# keys have not expired.
if now < timeout:
break
# NOTE(flaper87): Unset every key in
# this set from the cache if its timeout
# is equal to `timeout`. (The key might
# have been updated)
for subkey in self._keys_expires.pop(timeout):
try:
if self._cache[subkey][0] == timeout:
del self._cache[subkey]
except KeyError:
continue
def __delitem__(self, key):
self._purge_expired()
# NOTE(flaper87): Delete the key. Using pop
# since it could have been deleted already
value = self._cache.pop(key, None)
if value:
try:
# NOTE(flaper87): Keys with ttl == 0
# don't exist in the _keys_expires dict
self._keys_expires[value[0]].remove(value[1])
except (KeyError, ValueError):
pass
def _clear(self):
self._cache = {}
self._keys_expires = collections.defaultdict(set)
def _get_many(self, keys, default):
return super(MemoryBackend, self)._get_many(keys, default)
def _set_many(self, data, ttl=0):
return super(MemoryBackend, self)._set_many(data, ttl)
def _unset_many(self, keys):
return super(MemoryBackend, self)._unset_many(keys)
| 31.566265 | 78 | 0.589885 |
import collections
from marconi.openstack.common.cache import backends
from marconi.openstack.common import lockutils
from marconi.openstack.common import timeutils
class MemoryBackend(backends.BaseCache):
def __init__(self, parsed_url, options=None):
super(MemoryBackend, self).__init__(parsed_url, options)
self._clear()
def _set_unlocked(self, key, value, ttl=0):
expires_at = 0
if ttl != 0:
expires_at = timeutils.utcnow_ts() + ttl
self._cache[key] = (expires_at, value)
if expires_at:
self._keys_expires[expires_at].add(key)
def _set(self, key, value, ttl=0, not_exists=False):
with lockutils.lock(key):
if not_exists and self._exists_unlocked(key):
return False
self._set_unlocked(key, value, ttl)
return True
def _get_unlocked(self, key, default=None):
now = timeutils.utcnow_ts()
try:
timeout, value = self._cache[key]
except KeyError:
return (0, default)
if timeout and now >= timeout:
# NOTE(flaper87): Record expired,
# remove it from the cache but catch
# KeyError and ValueError in case
# _purge_expired removed this key already.
try:
del self._cache[key]
except KeyError:
pass
try:
# NOTE(flaper87): Keys with ttl == 0
# don't exist in the _keys_expires dict
self._keys_expires[timeout].remove(key)
except (KeyError, ValueError):
pass
return (0, default)
return (timeout, value)
def _get(self, key, default=None):
with lockutils.lock(key):
return self._get_unlocked(key, default)[1]
def _exists_unlocked(self, key):
now = timeutils.utcnow_ts()
try:
timeout = self._cache[key][0]
return not timeout or now <= timeout
except KeyError:
return False
def __contains__(self, key):
with lockutils.lock(key):
return self._exists_unlocked(key)
def _incr_append(self, key, other):
with lockutils.lock(key):
timeout, value = self._get_unlocked(key)
if value is None:
return None
ttl = timeutils.utcnow_ts() - timeout
new_value = value + other
self._set_unlocked(key, new_value, ttl)
return new_value
def _incr(self, key, delta):
if not isinstance(delta, int):
raise TypeError('delta must be an int instance')
return self._incr_append(key, delta)
def _append_tail(self, key, tail):
return self._incr_append(key, tail)
def _purge_expired(self):
now = timeutils.utcnow_ts()
for timeout in sorted(self._keys_expires.keys()):
if now < timeout:
break
for subkey in self._keys_expires.pop(timeout):
try:
if self._cache[subkey][0] == timeout:
del self._cache[subkey]
except KeyError:
continue
def __delitem__(self, key):
self._purge_expired()
value = self._cache.pop(key, None)
if value:
try:
self._keys_expires[value[0]].remove(value[1])
except (KeyError, ValueError):
pass
def _clear(self):
self._cache = {}
self._keys_expires = collections.defaultdict(set)
def _get_many(self, keys, default):
return super(MemoryBackend, self)._get_many(keys, default)
def _set_many(self, data, ttl=0):
return super(MemoryBackend, self)._set_many(data, ttl)
def _unset_many(self, keys):
return super(MemoryBackend, self)._unset_many(keys)
| true | true |
1c3b0be6de73d7cc06657e6f2db94f3125f6786a | 38 | py | Python | tests/dataframe_creation_test.py | mmutiso/dea | 6c497b05ef2e957b610a2f9390a3d55d69bcaa7d | [
"Apache-2.0"
] | null | null | null | tests/dataframe_creation_test.py | mmutiso/dea | 6c497b05ef2e957b610a2f9390a3d55d69bcaa7d | [
"Apache-2.0"
] | 1 | 2022-01-01T17:33:13.000Z | 2022-01-01T17:33:13.000Z | tests/dataframe_creation_test.py | mmutiso/dea | 6c497b05ef2e957b610a2f9390a3d55d69bcaa7d | [
"Apache-2.0"
] | null | null | null | def test_default():
assert True
| 12.666667 | 20 | 0.657895 | def test_default():
assert True
| true | true |
1c3b0c024441448637d709c8de20e8cccda5be38 | 108 | py | Python | whatpulse/config.py | beenotung/whatpulse | 56d2e16b989f8b0be80f41134ab82688bf90bdd9 | [
"MIT"
] | 33 | 2016-02-01T15:17:18.000Z | 2021-09-04T21:21:59.000Z | whatpulse/config.py | beenotung/whatpulse | 56d2e16b989f8b0be80f41134ab82688bf90bdd9 | [
"MIT"
] | 9 | 2016-05-30T12:48:52.000Z | 2020-08-12T23:36:10.000Z | whatpulse/config.py | beenotung/whatpulse | 56d2e16b989f8b0be80f41134ab82688bf90bdd9 | [
"MIT"
] | 8 | 2016-02-01T15:14:37.000Z | 2020-09-23T09:42:29.000Z | client_version = '2.8.1'
type_os = 'linux' # windows, linux, macos
__all__ = ["client_version", "type_os"]
| 21.6 | 41 | 0.685185 | client_version = '2.8.1'
type_os = 'linux'
__all__ = ["client_version", "type_os"]
| true | true |
1c3b0d2fb3fb9e1f8591c15f4f718e00532f5e15 | 1,456 | py | Python | paper_result/MINet/bicon/train/utils/tensor_ops.py | Zyun-Y/BiconNets | f5ea85dc58550969b99a2ccccccd8133dda4358c | [
"MIT"
] | 9 | 2021-03-02T05:16:03.000Z | 2022-02-01T07:47:00.000Z | paper_result/MINet/bicon/train/utils/tensor_ops.py | Zyun-Y/BiconNets | f5ea85dc58550969b99a2ccccccd8133dda4358c | [
"MIT"
] | null | null | null | paper_result/MINet/bicon/train/utils/tensor_ops.py | Zyun-Y/BiconNets | f5ea85dc58550969b99a2ccccccd8133dda4358c | [
"MIT"
] | 2 | 2021-08-10T07:30:01.000Z | 2021-11-22T03:01:50.000Z | # -*- coding: utf-8 -*-
import torch.nn.functional as F
def cus_sample(feat, **kwargs):
"""
:param feat: 输入特征
:param kwargs: size或者scale_factor
"""
assert len(kwargs.keys()) == 1 and list(kwargs.keys())[0] in ["size", "scale_factor"]
return F.interpolate(feat, **kwargs, mode="bilinear", align_corners=False)
def upsample_add(*xs):
y = xs[-1]
for x in xs[:-1]:
y = y + F.interpolate(x, size=y.size()[2:], mode="bilinear", align_corners=False)
return y
def upsample_cat(*xs):
y = xs[-1]
out = []
for x in xs[:-1]:
out.append(F.interpolate(x, size=y.size()[2:], mode="bilinear", align_corners=False))
return torch.cat([*out, y], dim=1)
def upsample_reduce(b, a):
"""
上采样所有特征到最后一个特征的尺度以及前一个特征的通道数
"""
_, C, _, _ = b.size()
N, _, H, W = a.size()
b = F.interpolate(b, size=(H, W), mode="bilinear", align_corners=False)
a = a.reshape(N, -1, C, H, W).mean(1)
return b + a
def shuffle_channels(x, groups):
"""
Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,W] -> [N,C,H,W]
一共C个channel要分成g组混合的channel,先把C reshape成(g, C/g)的形状,
然后转置成(C/g, g)最后平坦成C组channel
"""
N, C, H, W = x.size()
x = x.reshape(N, groups, C // groups, H, W).permute(0, 2, 1, 3, 4)
return x.reshape(N, C, H, W)
if __name__ == "__main__":
a = torch.rand(3, 4, 10, 10)
b = torch.rand(3, 2, 5, 5)
print(upsample_reduce(b, a).size())
| 25.103448 | 93 | 0.570055 |
import torch.nn.functional as F
def cus_sample(feat, **kwargs):
assert len(kwargs.keys()) == 1 and list(kwargs.keys())[0] in ["size", "scale_factor"]
return F.interpolate(feat, **kwargs, mode="bilinear", align_corners=False)
def upsample_add(*xs):
y = xs[-1]
for x in xs[:-1]:
y = y + F.interpolate(x, size=y.size()[2:], mode="bilinear", align_corners=False)
return y
def upsample_cat(*xs):
y = xs[-1]
out = []
for x in xs[:-1]:
out.append(F.interpolate(x, size=y.size()[2:], mode="bilinear", align_corners=False))
return torch.cat([*out, y], dim=1)
def upsample_reduce(b, a):
_, C, _, _ = b.size()
N, _, H, W = a.size()
b = F.interpolate(b, size=(H, W), mode="bilinear", align_corners=False)
a = a.reshape(N, -1, C, H, W).mean(1)
return b + a
def shuffle_channels(x, groups):
N, C, H, W = x.size()
x = x.reshape(N, groups, C // groups, H, W).permute(0, 2, 1, 3, 4)
return x.reshape(N, C, H, W)
if __name__ == "__main__":
a = torch.rand(3, 4, 10, 10)
b = torch.rand(3, 2, 5, 5)
print(upsample_reduce(b, a).size())
| true | true |
1c3b0de3f094a179e48a574a148643a328b237d7 | 2,449 | py | Python | olea/packages/json_api/json_api/conditions.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 2 | 2020-06-18T03:25:52.000Z | 2020-06-18T07:33:45.000Z | olea/packages/json_api/json_api/conditions.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 15 | 2021-01-28T07:11:04.000Z | 2021-05-24T07:11:37.000Z | olea/packages/json_api/json_api/conditions.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict
class BaseCondition():
def gen_schema(self, field_type, schema) -> Dict[str, Any]:
pass
class AnyValue(BaseCondition):
def gen_schema(self, field_type, schema):
return schema
class Regex(BaseCondition):
def __init__(self, pattern):
self.pattern = pattern
def gen_schema(self, field_type, schema):
schema.update({'pattern': self.pattern})
return schema
class MultipleOf(BaseCondition):
def __init__(self, factor):
self.factor = factor
def gen_schema(self, field_type, schema):
if field_type != 'number':
raise Exception()
schema.update({'multipleOf': self.factor})
return schema
class In(BaseCondition):
def __init__(self, *values):
self.values = list(set(values))
def gen_schema(self, field_type, schema):
if field_type == 'string':
schema.update({'enum': self.values})
elif field_type == 'array':
schema['items'].update({'enum': self.values})
else:
raise Exception()
return schema
class InRange(BaseCondition):
def __init__(self, min_val=None, max_val=None, min_inclusive=True, max_inclusive=True):
self.min = min_val
self.max = max_val
self.min_inclusive = min_inclusive
self.max_inclusive = max_inclusive
def gen_schema(self, field_type, schema):
result = dict()
if field_type == 'number':
if self.min:
result['minimum'] = self.min
if not self.min_inclusive:
result['exclusiveMinimum'] = False
if self.max:
result['maximum'] = self.max
if not self.max_inclusive:
result['exclusiveMaximum'] = False
elif field_type == 'string':
if self.min:
result['minLength'] = self.min
if self.max:
result['maxLength'] = self.max
elif field_type == 'array':
if self.min:
result['minItems'] = self.min
if self.max:
result['maxItems'] = self.max
schema.update(result)
return schema
class Contains(BaseCondition):
def __init__(self, condition):
self.confition = condition
def gen_schema(self, field_type, schema):
return {'contains': self.confition.gen_schema(field_type, schema)}
| 26.333333 | 91 | 0.588812 | from typing import Any, Dict
class BaseCondition():
def gen_schema(self, field_type, schema) -> Dict[str, Any]:
pass
class AnyValue(BaseCondition):
def gen_schema(self, field_type, schema):
return schema
class Regex(BaseCondition):
def __init__(self, pattern):
self.pattern = pattern
def gen_schema(self, field_type, schema):
schema.update({'pattern': self.pattern})
return schema
class MultipleOf(BaseCondition):
def __init__(self, factor):
self.factor = factor
def gen_schema(self, field_type, schema):
if field_type != 'number':
raise Exception()
schema.update({'multipleOf': self.factor})
return schema
class In(BaseCondition):
def __init__(self, *values):
self.values = list(set(values))
def gen_schema(self, field_type, schema):
if field_type == 'string':
schema.update({'enum': self.values})
elif field_type == 'array':
schema['items'].update({'enum': self.values})
else:
raise Exception()
return schema
class InRange(BaseCondition):
def __init__(self, min_val=None, max_val=None, min_inclusive=True, max_inclusive=True):
self.min = min_val
self.max = max_val
self.min_inclusive = min_inclusive
self.max_inclusive = max_inclusive
def gen_schema(self, field_type, schema):
result = dict()
if field_type == 'number':
if self.min:
result['minimum'] = self.min
if not self.min_inclusive:
result['exclusiveMinimum'] = False
if self.max:
result['maximum'] = self.max
if not self.max_inclusive:
result['exclusiveMaximum'] = False
elif field_type == 'string':
if self.min:
result['minLength'] = self.min
if self.max:
result['maxLength'] = self.max
elif field_type == 'array':
if self.min:
result['minItems'] = self.min
if self.max:
result['maxItems'] = self.max
schema.update(result)
return schema
class Contains(BaseCondition):
def __init__(self, condition):
self.confition = condition
def gen_schema(self, field_type, schema):
return {'contains': self.confition.gen_schema(field_type, schema)}
| true | true |
1c3b0e3649ccf6bc24793ef5e8f21581e122af57 | 6,443 | py | Python | stylebank/datasets.py | ncassereau-idris/stylebank | 2884d5eb8175622a03684ee621fd44736a431e82 | [
"MIT"
] | null | null | null | stylebank/datasets.py | ncassereau-idris/stylebank | 2884d5eb8175622a03684ee621fd44736a431e82 | [
"MIT"
] | null | null | null | stylebank/datasets.py | ncassereau-idris/stylebank | 2884d5eb8175622a03684ee621fd44736a431e82 | [
"MIT"
] | null | null | null | # /usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from hydra.utils import to_absolute_path
import torch
import torch.distributed as dist
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import torchvision.transforms.functional as TF
from torchvision.io import read_image
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import glob
import os
from . import tools
from .plasma import PlasmaStorage
log = logging.getLogger(__name__)
class PhotoDataset(Dataset):
def __init__(
self, path, transform, quantity=-1,
store_transformed=False, preload=False
):
assert store_transformed or not preload
self.store_transformed = store_transformed
self.filenames = glob.glob(
to_absolute_path(os.path.join(path, "*.jpg"))
)
self.filenames.sort()
if 0 < quantity <= len(self.filenames):
self.filenames = self.filenames[:quantity]
self.transform = transform
if preload:
log.info(f"Preloading data ({len(self.filenames)} files)")
self.files = self.preload()
log.info(f"{len(self.filenames)} files have been preloaded!")
else:
self.files = PlasmaStorage(autocuda=True)
def preload(self):
files = PlasmaStorage(autocuda=True)
for i, filename in enumerate(self.filenames):
if (i - tools.rank) % tools.size == 0:
files[i] = self.load_image(filename)
dist.barrier()
# pooling across all tasks
return files.merge()
def load_image(self, filename):
image = read_image(filename)
image = TF.to_pil_image(image)
return self.transform(image).cuda()
def get_image_from_filename(self, filename):
return self.get_image_from_idx(self.filenames.index(filename))
def get_image_from_idx(self, idx):
img = self.files[idx]
if img is None:
img = self.load_image(self.filenames[idx])
if self.store_transformed:
self.files[idx] = img
return img
def __len__(self):
return len(self.filenames)
def get_image(self, fileId):
if isinstance(fileId, int): # that's an index
return self.get_image_from_idx(fileId)
elif isinstance(fileId, str): # that's a filename
return self.get_image_from_filename(fileId)
def get_names(self, indices):
return [
os.path.splitext(
os.path.basename(self.filenames[idx])
)[0]
for idx in indices
]
def __getitem__(self, idx):
if isinstance(idx, int):
return idx, self.get_image(idx)
return idx, torch.stack([self.get_image(i) for i in idx])
class TrainingDataset(Dataset):
def __init__(self, cfg, content_dataset, style_dataset):
self.cfg = cfg
self.content_dataset = content_dataset
self.style_dataset = style_dataset
def __len__(self):
return self.cfg.training.repeat * len(self.style_dataset)
def __getitem__(self, idx):
return (
self.content_dataset[np.random.randint(len(self.content_dataset))],
self.style_dataset[idx % len(self.style_dataset)]
)
class Resize(object):
"""
Resize with aspect ratio preserved.
"""
def __init__(self, size):
self.size = size
def __call__(self, img):
m = min(img.size)
new_size = (
int(img.size[0] / m * self.size),
int(img.size[1] / m * self.size)
)
return img.resize(new_size, resample=Image.BILINEAR)
class DataManager:
def __init__(self, cfg):
self.cfg = cfg
self.transform = transforms.Compose([
Resize(513),
transforms.CenterCrop([513, 513]),
transforms.ToTensor(),
])
self.load_datasets()
if self.cfg.training.train:
self.make_training_dataloader()
def load_datasets(self):
log.info("Loading real pictures dataset")
self.content_dataset = PhotoDataset(
path=self.cfg.data.photo,
transform=self.transform,
store_transformed=self.cfg.data.store_transformed,
preload=self.cfg.data.preload_transformed
)
log.info(
f"Real pictures dataset has {len(self.content_dataset)} samples"
)
log.info("Loading monet paintings dataset")
self.style_dataset = PhotoDataset(
path=self.cfg.data.monet,
transform=self.transform,
quantity=self.cfg.data.style_quantity,
store_transformed=self.cfg.data.store_transformed,
preload=self.cfg.data.preload_transformed
)
log.info(f"Paintings dataset has {len(self.style_dataset)} samples")
def _distributed_sampler(self, dataset, **kwargs):
sampler = DistributedSampler(
dataset,
num_replicas=tools.size,
rank=tools.rank,
shuffle=False,
**kwargs
)
return sampler
def _dataloader(self, dataset, sampler=None, **kwargs):
if sampler is None:
sampler = self._distributed_sampler(dataset)
dataloader = DataLoader(
dataset,
batch_size=self.cfg.training.batch_size,
sampler=sampler,
**kwargs
)
return dataloader
def make_training_dataloader(self):
training_dataset = TrainingDataset(
self.cfg, self.content_dataset, self.style_dataset
)
return self._dataloader(training_dataset)
def make_preload_dataloaders(self):
content_dataloader = self._dataloader(self.content_dataset)
style_dataloader = self._dataloader(self.style_dataset)
return content_dataloader, style_dataloader
def cycle(self, iterable):
# This version of cycle shuffles the dataset between
# each epoch unlike itertools' version
while True:
for x in iterable:
yield x
def make_generation_dataloader(self):
combined_dataset = TrainingDataset(
self.cfg, self.content_dataset, self.style_dataset
)
dataloader = self._dataloader(combined_dataset)
return self.cycle(dataloader)
| 30.535545 | 79 | 0.624864 |
import logging
from hydra.utils import to_absolute_path
import torch
import torch.distributed as dist
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import torchvision.transforms.functional as TF
from torchvision.io import read_image
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
import glob
import os
from . import tools
from .plasma import PlasmaStorage
log = logging.getLogger(__name__)
class PhotoDataset(Dataset):
def __init__(
self, path, transform, quantity=-1,
store_transformed=False, preload=False
):
assert store_transformed or not preload
self.store_transformed = store_transformed
self.filenames = glob.glob(
to_absolute_path(os.path.join(path, "*.jpg"))
)
self.filenames.sort()
if 0 < quantity <= len(self.filenames):
self.filenames = self.filenames[:quantity]
self.transform = transform
if preload:
log.info(f"Preloading data ({len(self.filenames)} files)")
self.files = self.preload()
log.info(f"{len(self.filenames)} files have been preloaded!")
else:
self.files = PlasmaStorage(autocuda=True)
def preload(self):
files = PlasmaStorage(autocuda=True)
for i, filename in enumerate(self.filenames):
if (i - tools.rank) % tools.size == 0:
files[i] = self.load_image(filename)
dist.barrier()
return files.merge()
def load_image(self, filename):
image = read_image(filename)
image = TF.to_pil_image(image)
return self.transform(image).cuda()
def get_image_from_filename(self, filename):
return self.get_image_from_idx(self.filenames.index(filename))
def get_image_from_idx(self, idx):
img = self.files[idx]
if img is None:
img = self.load_image(self.filenames[idx])
if self.store_transformed:
self.files[idx] = img
return img
def __len__(self):
return len(self.filenames)
def get_image(self, fileId):
if isinstance(fileId, int):
return self.get_image_from_idx(fileId)
elif isinstance(fileId, str): # that's a filename
return self.get_image_from_filename(fileId)
def get_names(self, indices):
return [
os.path.splitext(
os.path.basename(self.filenames[idx])
)[0]
for idx in indices
]
def __getitem__(self, idx):
if isinstance(idx, int):
return idx, self.get_image(idx)
return idx, torch.stack([self.get_image(i) for i in idx])
class TrainingDataset(Dataset):
def __init__(self, cfg, content_dataset, style_dataset):
self.cfg = cfg
self.content_dataset = content_dataset
self.style_dataset = style_dataset
def __len__(self):
return self.cfg.training.repeat * len(self.style_dataset)
def __getitem__(self, idx):
return (
self.content_dataset[np.random.randint(len(self.content_dataset))],
self.style_dataset[idx % len(self.style_dataset)]
)
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, img):
m = min(img.size)
new_size = (
int(img.size[0] / m * self.size),
int(img.size[1] / m * self.size)
)
return img.resize(new_size, resample=Image.BILINEAR)
class DataManager:
def __init__(self, cfg):
self.cfg = cfg
self.transform = transforms.Compose([
Resize(513),
transforms.CenterCrop([513, 513]),
transforms.ToTensor(),
])
self.load_datasets()
if self.cfg.training.train:
self.make_training_dataloader()
def load_datasets(self):
log.info("Loading real pictures dataset")
self.content_dataset = PhotoDataset(
path=self.cfg.data.photo,
transform=self.transform,
store_transformed=self.cfg.data.store_transformed,
preload=self.cfg.data.preload_transformed
)
log.info(
f"Real pictures dataset has {len(self.content_dataset)} samples"
)
log.info("Loading monet paintings dataset")
self.style_dataset = PhotoDataset(
path=self.cfg.data.monet,
transform=self.transform,
quantity=self.cfg.data.style_quantity,
store_transformed=self.cfg.data.store_transformed,
preload=self.cfg.data.preload_transformed
)
log.info(f"Paintings dataset has {len(self.style_dataset)} samples")
def _distributed_sampler(self, dataset, **kwargs):
sampler = DistributedSampler(
dataset,
num_replicas=tools.size,
rank=tools.rank,
shuffle=False,
**kwargs
)
return sampler
def _dataloader(self, dataset, sampler=None, **kwargs):
if sampler is None:
sampler = self._distributed_sampler(dataset)
dataloader = DataLoader(
dataset,
batch_size=self.cfg.training.batch_size,
sampler=sampler,
**kwargs
)
return dataloader
def make_training_dataloader(self):
training_dataset = TrainingDataset(
self.cfg, self.content_dataset, self.style_dataset
)
return self._dataloader(training_dataset)
def make_preload_dataloaders(self):
content_dataloader = self._dataloader(self.content_dataset)
style_dataloader = self._dataloader(self.style_dataset)
return content_dataloader, style_dataloader
def cycle(self, iterable):
while True:
for x in iterable:
yield x
def make_generation_dataloader(self):
combined_dataset = TrainingDataset(
self.cfg, self.content_dataset, self.style_dataset
)
dataloader = self._dataloader(combined_dataset)
return self.cycle(dataloader)
| true | true |
1c3b0eb236deebcb953e7906a54e6acab54f1c15 | 2,500 | py | Python | tests/events.py | msabramo/chaperone | 9ff2c3a5b9c6820f8750320a564ea214042df06f | [
"Apache-2.0"
] | 186 | 2015-07-22T00:08:04.000Z | 2021-11-05T21:51:09.000Z | tests/events.py | msabramo/chaperone | 9ff2c3a5b9c6820f8750320a564ea214042df06f | [
"Apache-2.0"
] | 24 | 2015-07-27T15:30:14.000Z | 2021-09-11T21:19:37.000Z | tests/events.py | msabramo/chaperone | 9ff2c3a5b9c6820f8750320a564ea214042df06f | [
"Apache-2.0"
] | 26 | 2016-01-11T21:02:30.000Z | 2021-08-31T11:09:25.000Z | from prefix import *
from chaperone.cutil.events import EventSource
class handlers:
def __init__(self):
self.results = list()
def handler1(self, val):
self.results.append("handler1:" + val)
def handler2(self, val):
self.results.append("handler2:" + val)
def handler3(self, val):
self.results.append("handler3:" + val)
class TestEvents(unittest.TestCase):
def setUp(self):
self.h = handlers()
self.e = EventSource()
def test_event1(self):
self.e.add(onH1 = self.h.handler1)
self.e.add(onH1 = self.h.handler1)
self.e.onH1("First trigger")
self.e.onH1("Second trigger")
self.assertEqual(self.h.results,
['handler1:First trigger', 'handler1:First trigger', 'handler1:Second trigger', 'handler1:Second trigger'])
self.e.remove(onH1 = self.h.handler1)
self.e.onH1("Third trigger")
self.e.remove(onH1 = self.h.handler1)
self.e.onH1("Fourth trigger")
self.assertEqual(self.h.results,
['handler1:First trigger', 'handler1:First trigger', 'handler1:Second trigger', 'handler1:Second trigger', 'handler1:Third trigger'])
def test_event2(self):
self.e.add(onH1 = self.h.handler1)
self.assertRaisesRegex(TypeError, 'but 3 were given', lambda: self.e.onH1("arg1", "arg2"))
def test_event3(self):
self.e.add(onMulti = self.h.handler1)
self.e.add(onMulti = self.h.handler2)
self.e.onMulti("TWO")
self.e.add(onMulti = self.h.handler3)
self.e.onMulti("THREE")
self.assertEqual(self.h.results,
['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE'])
self.e.remove(onMulti = self.h.handler2)
self.e.onMulti("AFTER-REMOVE")
self.assertEqual(self.h.results,
['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE', 'handler1:AFTER-REMOVE', 'handler3:AFTER-REMOVE'])
self.e.remove(onMulti = self.h.handler1)
self.e.remove(onMulti = self.h.handler2)
self.e.remove(onMulti = self.h.handler3)
self.e.onMulti("EMPTY")
self.assertEqual(self.h.results,
['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE', 'handler1:AFTER-REMOVE', 'handler3:AFTER-REMOVE'])
if __name__ == '__main__':
unittest.main()
| 39.0625 | 162 | 0.614 | from prefix import *
from chaperone.cutil.events import EventSource
class handlers:
def __init__(self):
self.results = list()
def handler1(self, val):
self.results.append("handler1:" + val)
def handler2(self, val):
self.results.append("handler2:" + val)
def handler3(self, val):
self.results.append("handler3:" + val)
class TestEvents(unittest.TestCase):
def setUp(self):
self.h = handlers()
self.e = EventSource()
def test_event1(self):
self.e.add(onH1 = self.h.handler1)
self.e.add(onH1 = self.h.handler1)
self.e.onH1("First trigger")
self.e.onH1("Second trigger")
self.assertEqual(self.h.results,
['handler1:First trigger', 'handler1:First trigger', 'handler1:Second trigger', 'handler1:Second trigger'])
self.e.remove(onH1 = self.h.handler1)
self.e.onH1("Third trigger")
self.e.remove(onH1 = self.h.handler1)
self.e.onH1("Fourth trigger")
self.assertEqual(self.h.results,
['handler1:First trigger', 'handler1:First trigger', 'handler1:Second trigger', 'handler1:Second trigger', 'handler1:Third trigger'])
def test_event2(self):
self.e.add(onH1 = self.h.handler1)
self.assertRaisesRegex(TypeError, 'but 3 were given', lambda: self.e.onH1("arg1", "arg2"))
def test_event3(self):
self.e.add(onMulti = self.h.handler1)
self.e.add(onMulti = self.h.handler2)
self.e.onMulti("TWO")
self.e.add(onMulti = self.h.handler3)
self.e.onMulti("THREE")
self.assertEqual(self.h.results,
['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE'])
self.e.remove(onMulti = self.h.handler2)
self.e.onMulti("AFTER-REMOVE")
self.assertEqual(self.h.results,
['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE', 'handler1:AFTER-REMOVE', 'handler3:AFTER-REMOVE'])
self.e.remove(onMulti = self.h.handler1)
self.e.remove(onMulti = self.h.handler2)
self.e.remove(onMulti = self.h.handler3)
self.e.onMulti("EMPTY")
self.assertEqual(self.h.results,
['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE', 'handler1:AFTER-REMOVE', 'handler3:AFTER-REMOVE'])
if __name__ == '__main__':
unittest.main()
| true | true |
1c3b0f72301a9ab5af61f59e6bbc9d5259041a3d | 1,302 | py | Python | djangocms_file/migrations/0006_migrate_to_filer.py | crydotsnake/djangocms-file | 044bc86545493493227d2393cea475e23759478c | [
"BSD-3-Clause"
] | 25 | 2015-10-24T20:37:23.000Z | 2020-09-24T12:02:35.000Z | djangocms_file/migrations/0006_migrate_to_filer.py | crydotsnake/djangocms-file | 044bc86545493493227d2393cea475e23759478c | [
"BSD-3-Clause"
] | 40 | 2015-03-23T16:17:34.000Z | 2020-09-02T08:30:44.000Z | djangocms_file/migrations/0006_migrate_to_filer.py | crydotsnake/djangocms-file | 044bc86545493493227d2393cea475e23759478c | [
"BSD-3-Clause"
] | 17 | 2015-08-18T17:41:03.000Z | 2020-02-11T23:42:00.000Z | import django.db.models.deletion
from django.db import migrations, models
import filer.fields.file
def migrate_to_filer(apps, schema_editor):
# Because filer is polymorphic, Djangos migration can't handle
from filer.models import File
FileInstance = apps.get_model('djangocms_file', 'File')
plugins = FileInstance.objects.all()
for plugin in plugins: # pragma: no cover
if plugin.file:
filesrc = File.objects.get_or_create(
file=plugin.file.file,
defaults={
'name': plugin.file.name,
}
)[0]
plugins.filter(pk=plugin.pk).update(file_src=filesrc)
class Migration(migrations.Migration):
dependencies = [
('filer', '0006_auto_20160623_1627'),
('djangocms_file', '0005_auto_20160119_1534'),
]
operations = [
migrations.AddField(
model_name='file',
name='file_src',
field=filer.fields.file.FilerFileField(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name='File', blank=True, to='filer.File', null=True),
),
migrations.RunPython(migrate_to_filer),
migrations.RemoveField(
model_name='file',
name='file',
),
]
| 30.27907 | 176 | 0.615207 | import django.db.models.deletion
from django.db import migrations, models
import filer.fields.file
def migrate_to_filer(apps, schema_editor):
from filer.models import File
FileInstance = apps.get_model('djangocms_file', 'File')
plugins = FileInstance.objects.all()
for plugin in plugins: # pragma: no cover
if plugin.file:
filesrc = File.objects.get_or_create(
file=plugin.file.file,
defaults={
'name': plugin.file.name,
}
)[0]
plugins.filter(pk=plugin.pk).update(file_src=filesrc)
class Migration(migrations.Migration):
dependencies = [
('filer', '0006_auto_20160623_1627'),
('djangocms_file', '0005_auto_20160119_1534'),
]
operations = [
migrations.AddField(
model_name='file',
name='file_src',
field=filer.fields.file.FilerFileField(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name='File', blank=True, to='filer.File', null=True),
),
migrations.RunPython(migrate_to_filer),
migrations.RemoveField(
model_name='file',
name='file',
),
]
| true | true |
1c3b0f88c705ffdd66629d61b72914e4eb0d5a7a | 43,116 | py | Python | lights.py | caj380/lifx-lan-gui | 610f1cea7c915dd6d9c2d5108a1c5a19309527f2 | [
"MIT"
] | null | null | null | lights.py | caj380/lifx-lan-gui | 610f1cea7c915dd6d9c2d5108a1c5a19309527f2 | [
"MIT"
] | null | null | null | lights.py | caj380/lifx-lan-gui | 610f1cea7c915dd6d9c2d5108a1c5a19309527f2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
if sys.version_info < (3, 3):
sys.stdout.write("Sorry, This module requires Python 3.3 (or higher), not Python 2.x. You are using Python {0}.{1}\n".format(sys.version_info[0],sys.version_info[1]))
sys.exit(1)
from appJar import gui
import os
import time
import binascii
import lifxlan
import colorsys
from colour import Color
import math
import sys
from time import sleep
from lifxlan import BLUE, CYAN, GREEN, ORANGE, PINK, PURPLE, RED, YELLOW
from configobj import ConfigObj
import pickle as pkl
from random import randint
from platform import system
from PIL import Image
import appJar as aJ
import numpy as np
import cv2
from scipy.stats import itemfreq
from mss import mss
myos = system()
if (myos == 'Windows') or (myos == 'Darwin'):
from PIL import ImageGrab
elif (myos == 'Linux'):
import pyscreenshot as ImageGrab
if (myos == 'Windows'):
mygreen = 'lime'
elif (myos == 'Darwin') or (myos == 'Linux') :
mygreen = 'green'
def resource_path(relative_path):
if (myos == 'Windows'):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
elif (myos == 'Darwin') or (myos == 'Linux') :
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
DECIMATE = 1 # skip every DECIMATE number of pixels to speed up calculation
TRANSIENT_TIP = "If selected, return to the original color after the specified number of cycles. If not selected, set light to specified color"
PERIOD_TIP = "Period is the length of one cycle in milliseconds"
CYCLES_TIP = "Cycles is the number of times to repeat the waveform"
DUTY_CYCLE_TIP = "Duty Cycle is an integer between -32768 and 32767. Its effect is most obvious with the Pulse waveform. Set Duty Cycle to 0 to spend an equal amount of time on the original color and the new color. Set Duty Cycle to positive to spend more time on the original color. Set Duty Cycle to negative to spend more time on the new color"
EXPECTED_TIP = "Select 0 to find all available bulbs. Select any number to look for exactly that number of bulbs"
TRANSITION_TIME_TIP = "The time (in ms) that a color transition takes"
FOLLOW_DESKTOP_TIP = "Make your bulbs' color match your desktop"
DESKTOP_MODE_TIP = "Select between following the whole desktop screen or just a small portion of it (useful for letterbox movies)"
EXPECTED_BULBS = 0
TRANSITION_TIME_DEFAULT = 400
CONFIG = resource_path("lights.ini")
PICKLE = resource_path("lifxList.pkl")
SCENE1_C = resource_path("scene1_c.pkl")
SCENE1_P = resource_path("scene1_p.pkl")
SCENE2_C = resource_path("scene2_c.pkl")
SCENE2_P = resource_path("scene2_p.pkl")
SCENE3_C = resource_path("scene3_c.pkl")
SCENE3_P = resource_path("scene3_p.pkl")
CYCLES = "Cycles"
TRANSITION_TIME = "Transition Time(ms)"
FOLLOW_DESKTOP = "Follow Desktop"
DESKTOP_MODE = "Desktop Mode"
REGION_COLOR = "regioncolor"
MAX_SATURATION = "Max Saturation"
MAX_BRIGHTNESS = "Max Brightness"
alreadyDone = False
config = {}
bulbs = []
selected_bulb = 0
details = str(0)
gSelectAll = False
lan = 0
gExpectedBulbs = EXPECTED_BULBS
lifxList = []
lifxDict = {}
gwaveformcolor = "#FF0000"
is_follow = False
test_string = """
"""
original_colors1 = {}
original_powers1 = {}
original_colors2 = {}
original_powers2 = {}
original_colors3 = {}
original_powers3 = {}
r = None
selectedMode = "Whole Screen"
maxSaturation = False
maxBrightness = False
class App(aJ.gui):
def __init__(self, *args, **kwargs):
aJ.gui.__init__(self, *args, **kwargs)
def winfo_screenheight(self):
# shortcut to height
# alternatively return self.topLevel.winfo_screenheight() since topLevel is Tk (root) instance!
return self.appWindow.winfo_screenheight()
def winfo_screenwidth(self):
# shortcut to width
# alternatively return self.topLevel.winfo_screenwidth() since topLevel is Tk (root) instance!
return self.appWindow.winfo_screenwidth()
def SceneNameChanged(name):
#print(name, "Entry changed")
config[name] = app.getEntry(name)
config.write()
def Scene(name):
global original_colors1
global original_powers1
global original_colors2
global original_powers2
global original_colors3
global original_powers3
global lan
global config
print(name, "button pressed")
if len(bulbs) < 1:
app.errorBox("Error", "Error. No bulbs were found yet. Please click the 'Find Bulbs' button and try again.")
return
try:
if name == 'Save Scene 1':
print("Saving Scene 1")
original_colors1 = lan.get_color_all_lights()
original_powers1 = lan.get_power_all_lights()
#print("colors:",original_colors)
#print(type(original_colors1))
pkl.dump(original_colors1, open(SCENE1_C, "wb" ))
pkl.dump(original_powers1, open(SCENE1_P, "wb" ))
elif name == 'Restore Scene 1':
print("Restoring Scene 1")
if (os.path.exists(SCENE1_C) and os.path.exists(SCENE1_P) ):
original_colors1 = pkl.load(open(SCENE1_C, "rb"))
original_powers1 = pkl.load(open(SCENE1_P, "rb"))
if ( (len(original_colors1) == 0) or (len(original_powers1) == 0) ):
print("Nothing saved yet.")
return
print("Restoring original color to all lights...")
#print("colors:",original_colors)
for light in original_colors1:
light.set_color(original_colors1[light])
sleep(1)
print("Restoring original power to all lights...")
for light in original_powers1:
light.set_power(original_powers1[light])
elif name == 'Save Scene 2':
print("Saving Scene 2")
original_colors2 = lan.get_color_all_lights()
original_powers2 = lan.get_power_all_lights()
#print("colors:",original_colors)
pkl.dump(original_colors2, open(SCENE2_C, "wb" ))
pkl.dump(original_powers2, open(SCENE2_P, "wb" ))
elif name == 'Restore Scene 2':
print("Restoring Scene 2")
if (os.path.exists(SCENE2_C) and os.path.exists(SCENE2_P) ):
original_colors2 = pkl.load(open(SCENE2_C, "rb"))
original_powers2 = pkl.load(open(SCENE2_P, "rb"))
if ( (len(original_colors2) == 0) or (len(original_powers2) == 0) ):
print("Nothing saved yet.")
return
print("Restoring original color to all lights...")
#print("colors:",original_colors)
for light in original_colors2:
light.set_color(original_colors2[light])
sleep(1)
print("Restoring original power to all lights...")
for light in original_powers2:
light.set_power(original_powers2[light])
elif name == 'Save Scene 3':
print("Saving Scene 3")
original_colors3 = lan.get_color_all_lights()
original_powers3 = lan.get_power_all_lights()
#print("colors:",original_colors)
pkl.dump(original_colors3, open(SCENE3_C, "wb" ))
pkl.dump(original_powers3, open(SCENE3_P, "wb" ))
elif name == 'Restore Scene 3':
print("Restoring Scene 3")
if (os.path.exists(SCENE3_C) and os.path.exists(SCENE3_P) ):
original_colors3 = pkl.load(open(SCENE3_C, "rb"))
original_powers3 = pkl.load(open(SCENE3_P, "rb"))
if ( (len(original_colors3) == 0) or (len(original_powers3) == 0) ):
print("Nothing saved yet.")
return
print("Restoring original color to all lights...")
#print("colors:",original_colors)
for light in original_colors3:
light.set_color(original_colors3[light])
sleep(1)
print("Restoring original power to all lights...")
for light in original_powers3:
light.set_power(original_powers3[light])
except Exception as e:
print ("Ignoring error: ", str(e))
app.errorBox("Error", str(e) + "\n\n Scene Operation failed. This feature is buggy and only works about 50% of the time. Sometimes, you can still save and restore a scene despite this error. If you keep getting this error and can not perform a 'Restore', try restarting the app then try again.")
return
def updateSliders(hsbk):
#print("h:",hsbk[0])
#print("s:",hsbk[1])
#print("b:",hsbk[2])
#print("k:",hsbk[3])
app.setSpinBox("hueSpin", int(hsbk[0]), callFunction=False)
app.setSpinBox("satSpin", int(hsbk[1]), callFunction=False)
app.setSpinBox("briSpin", int(hsbk[2]), callFunction=False)
app.setSpinBox("kelSpin", int(hsbk[3]), callFunction=False)
app.setScale("hueScale", int(hsbk[0]), callFunction=False)
app.setScale("satScale", int(hsbk[1]), callFunction=False)
app.setScale("briScale", int(hsbk[2]), callFunction=False)
app.setScale("kelScale", int(hsbk[3]), callFunction=False)
def RGBtoHSBK (RGB, temperature = 3500):
cmax = max(RGB)
cmin = min(RGB)
cdel = cmax - cmin
brightness = int((cmax/255) * 65535)
if cdel != 0:
saturation = int(((cdel) / cmax) * 65535)
redc = (cmax - RGB[0]) / (cdel)
greenc = (cmax - RGB[1]) / (cdel)
bluec = (cmax - RGB[2]) / (cdel)
if RGB[0] == cmax:
hue = bluec - greenc
else:
if RGB[1] == cmax:
hue = 2 + redc - bluec
else:
hue = 4 + greenc - redc
hue = hue / 6
if hue < 0:
hue = hue + 1
hue = int(hue*65535)
else:
saturation = 0
hue = 0
return (hue, saturation, brightness, temperature)
# function to convert the scale values to an RGB hex code
def getHSB():
H = app.getScale("hueScale")
S = app.getScale("satScale")
B = app.getScale("briScale")
K = app.getScale("kelScale")
#RGB = "#"+str(R)+str(G)+str(B)
return {'H':H, 'S':S,'B':B, 'K':K }
# funciton to update widgets
def updateHSB(name):
# this stops the changes in slider/spin from constantly calling each other
#print ("name:",name)
global alreadyDone
if alreadyDone:
alreadyDone = False
return
else:
alreadyDone = True
# split the widget's name into the type & colour
colour = name[0:3]
widg = name[3:]
# get the current RGB value
HSB = getHSB()
#print("HSB:",HSB,"type(HSB)",type(HSB))
#print("H",HSB["H"])
#print("S",HSB["S"])
#print("B",HSB["B"])
# depending on the type, get & set...
if widg == "Scale":
value = app.getScale(name)
app.setSpinBox(colour + "Spin", value)
elif widg == "Spin":
value = app.getSpinBox(name)
app.setScale(colour + "Scale", value)
# update the label
h = HSB["H"] / 65535.0;#print("h:",h)
s = HSB["S"] / 65535.0;#print("s:",s)
v = HSB["B"] / 65535.0;#print("v:",v)
k = HSB["K"];#print("v:",v)
rgb1 = hsv_to_rgb(h, s, v);#print("rgb1:",rgb1)
c = Color(rgb=(rgb1[0], rgb1[1], rgb1[2]))
#print("c:",c)
app.setLabelBg("bulbcolor", c.hex_l)
global selected_bulb
bulbHSBK = [HSB["H"],HSB["S"],HSB["B"],k]
#print ("bulbHSBK:",bulbHSBK)
if gSelectAll:
lan.set_color_all_lights(bulbHSBK, duration=0, rapid=False)
elif selected_bulb:
#print("sending color",hsv)
selected_bulb.set_color(bulbHSBK, duration=0, rapid=False)
#app.setEntry("colCode", RGB)
def selectAllPressed (name):
global bulbs
if len(bulbs) < 1:
app.errorBox("Error", "Error. No bulbs were found yet. Please click the 'Find Bulbs' button and try again.")
app.setCheckBox("Select All", ticked=False, callFunction=False)
return
global gSelectAll
gSelectAll = app.getCheckBox("Select All")
#print("gSelectAll:",gSelectAll)
def expectedPressed (name):
global gExpectedBulbs
global config
gExpectedBulbs = int(app.getSpinBox("Expected Bulbs"))
config['expectedbulbs'] = gExpectedBulbs
config.write()
#print("gExpectedBulbs:",gExpectedBulbs)
def rgb_to_hsv(r, g, b):
r = float(r)
g = float(g)
b = float(b)
high = max(r, g, b)
low = min(r, g, b)
h, s, v = high, high, high
d = high - low
s = 0 if high == 0 else d / high
if high == low:
h = 0.0
else:
h = {
r: (g - b) / d + (6 if g < b else 0),
g: (b - r) / d + 2,
b: (r - g) / d + 4,
}[high]
h /= 6
return h, s, v
def hsv_to_rgb(h, s, v):
i = math.floor(h * 6)
f = h * 6 - i
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = [
(v, t, p),
(q, v, p),
(p, v, t),
(p, q, v),
(t, p, v),
(v, p, q),
][int(i % 6)]
return r, g, b
def modeChanged():
global selectedMode
selectedMode = (app.getOptionBox("Desktop Mode"))#;print("selectedMode: ",selectedMode)
def listChanged():
app.clearTextArea("Result"); # TODO. Put this in another thread
app.setTextArea("Result", "Loading bulb details") # TODO. Put this in another thread
selected = (app.getOptionBox("LIFX Bulbs"))#;print("selected: ",selected)
global bulbs
global selected_bulb
global details
try:
for bulb in bulbs:
if (bulb.label == selected):
#print("Found selected bulb")
selected_bulb = bulb
details = str(selected_bulb)
#print("type(bulb)",type(bulb))
#print(bulb)
#print("breaking")
break
except Exception as e:
print ("Ignoring error: ", str(e))
app.errorBox("Error", str(e))
app.clearTextArea("Result");
app.setTextArea("Result", str(e))
return
app.clearTextArea("Result")
app.setTextArea("Result", details)
try:
if "Power: On" in details:
#print ("BULB is ON")
app.setButtonImage("Light", resource_path("bulb_on.gif"))
elif "Power: Off" in details:
#print ("BULB is OFF ")
app.setButtonImage("Light", resource_path("bulb_off.gif"))
except Exception as e:
print ("Ignoring error:", str(e))
app.setButton ( "Light", "Toggle " + selected )
app.showButton("Light")
color = bulb.get_color();#print(color[0],color[1],color[2]);
h = color[0] / 65535.0;#print("h:",h)
s = color[1] / 65535.0;#print("s:",s)
v = color[2] / 65535.0;#print("v:",v)
rgb1 = hsv_to_rgb(h, s, v);#print("rgb1:",rgb1)
c = Color(rgb=(rgb1[0], rgb1[1], rgb1[2]))
#print("c:",c)
app.setLabelBg("bulbcolor", c.hex_l)
updateSliders(color)
def finder():
global bulbList
global lan
global gExpectedBulbs
global config
global lifxList
global lifxDict
global config
bulbList.clear()
bulbList.append("-Select Bulb-")
try:
global bulbs
#print("finder().gExpectedBulbs:",gExpectedBulbs)
lan = lifxlan.LifxLAN(int(gExpectedBulbs) if int(gExpectedBulbs) != 0 else None)
bulbs = lan.get_lights()
#print(type(bulbs))
#print(bulbs[0].label)
if len(bulbs) < 1:
app.errorBox("Error", "No bulbs found. Please try again. If you switched WiFi networks, please re-start the app and try again.")
app.setLabelBg("lbl2", "red")
app.setLabel("lbl2", "Found 0 bulbs")
return
else:
app.setLabelBg("lbl2", mygreen)
app.hideLabel("f1")
app.setLabel("lbl2", "Found " + str(len(bulbs)) + " bulbs")
app.setCheckBox("Select All")
#app.setSpinBox("Expected Bulbs", str(len(bulbs)))
del lifxList[:]
for bulb in bulbs:
#print(".get_label()",bulb.get_label()) # this gets the actual label
#print(".label:",bulb.label) # this returns None
label = bulb.get_label()
ip = bulb.ip_addr
mac = bulb.mac_addr
#print (label,ip,mac)
lifxDict['label'] = label
lifxDict['mac'] = mac
lifxDict['ip'] = ip
lifxList.append(lifxDict.copy())
bulbList.append(label)
app.changeOptionBox("LIFX Bulbs", bulbList, callFunction=False)
app.showButton ( "Pick Color" )
#print(lifxList)
#config['bulbs'] = lifxList
pkl.dump(lifxList, open(PICKLE, "wb" )) #this pickles
#exit(0)
#config.write()
except Exception as e:
print ("Ignoring error:", str(e))
app.setLabelBg("lbl2", "gray")
app.setLabel("lbl2", "Found 0 bulbs")
app.errorBox("Error", str(e) + "\n\nPlease try again. If you keep getting this error, check/toggle your WiFi, ensure that 'Expected Bulbs' is either 0 or the number of bulbs you have and finally, try restarting the app")
# config['bulbs'] = bulbs
# config.write()
print ("finder() Ended")
def press(name):
global bulbs
global details
global gSelectAll
global lan
global gwaveformcolor
global selected_bulb
#print(name, "button pressed")
if (name == "Find Bulbs"):
finder()
elif (name == "All Off"):
if len(bulbs) < 1:
return
lan.set_power_all_lights(False, rapid=True)
elif (name == "All Random"):
if len(bulbs) < 1:
return
selected = (app.getOptionBox("LIFX Bulbs"))
for bulb in bulbs:
hue = (randint(0, 65535))
sat = (randint(40000, 65535))
bulb.set_color([hue, sat, 65535, 3500], duration=0, rapid=True)
if (bulb.label == selected):
h = hue / 65535.0;#print("h:",h)
s = sat / 65535.0;#print("s:",s)
v = 1;#print("v:",v)
rgb1 = hsv_to_rgb(h, s, v);#print("rgb1:",rgb1)
c = Color(rgb=(rgb1[0], rgb1[1], rgb1[2]))
app.setLabelBg("bulbcolor", c.hex_l)
updateSliders([hue,sat,65535,3500])
elif (name == "All On"):
if len(bulbs) < 1:
return
lan.set_power_all_lights(True, rapid=True)
elif (name == "All White"):
if len(bulbs) < 1:
return
lan.set_color_all_lights([0,0,65535,3500], duration=0, rapid=True)
updateSliders([0,0,65535,3500])
app.setLabelBg("bulbcolor", "#FFFFFF")
elif (name == "Execute"):
waveform = app.getRadioButton("waveform")
config['waveform'] = waveform
if waveform == "Saw":
waveform = 0
elif waveform == "Sine":
waveform = 1
elif waveform == "HalfSine":
waveform = 2
elif waveform == "Triangle":
waveform = 3
elif waveform == "Pulse (Strobe)":
waveform = 4
#print ("waveform:",waveform)
is_transient = app.getCheckBox("Transient")
config['transient'] = is_transient
if (is_transient):
is_transient = 1
else:
is_transient = 0
#print("is_transient:",is_transient)
#pickedColor = app.getLabelBg("lblwaveformcolor")
#print("gwaveformcolor:",gwaveformcolor)
config['secondary_color'] = gwaveformcolor
c = Color(str(gwaveformcolor))
hsv = rgb_to_hsv(c.red, c.green, c.blue)
#print("hsv:",hsv)
bulbHSBK = [hsv[0] * 65535.0,hsv[1] * 65535.0,hsv[2] * 65535.0,3500]
#print (bulbHSBK)
period = app.getEntry("Period(ms)")
cycles = app.getEntry(CYCLES)
duty_cycle = app.getEntry("Duty Cycle")
config['period'] = period
config['cycles'] = cycles
config['duty_cycle'] = duty_cycle
config.write()
#print("period:",period)
#print("cycles:",cycles)
#print("duty_cycle:",duty_cycle)
if gSelectAll:
lan.set_waveform_all_lights(is_transient, bulbHSBK, period, cycles, duty_cycle, waveform, [1])
elif selected_bulb:
#print("sending color",hsv)
selected_bulb.set_waveform(is_transient, bulbHSBK, period, cycles, duty_cycle, waveform)
else:
app.errorBox("Error", "Error. No bulb was selected. Please select a bulb from the pull-down menu (or tick the 'Select All' checkbox) and try again.")
return
elif (name == "Secondary Color"):
pickedColor = app.colourBox(colour="#FF0000")
app.setLabelBg("lblwaveformcolor", pickedColor)
gwaveformcolor = pickedColor
elif (name == "Pick Color"):
pickedColor = app.colourBox(colour="#FFFFFF")
app.setLabelBg("bulbcolor", pickedColor)
#print("pickedColor:",pickedColor)
if pickedColor == None:
return
c = Color(str(pickedColor))
hsv = rgb_to_hsv(c.red, c.green, c.blue)
#print("hsv:",hsv)
bulbHSBK = [hsv[0] * 65535.0,hsv[1] * 65535.0,hsv[2] * 65535.0,3500]
#print ("bulbHSBK:",bulbHSBK)
if gSelectAll:
lan.set_color_all_lights(bulbHSBK, duration=0, rapid=False)
elif selected_bulb:
#print("sending color",hsv)
selected_bulb.set_color(bulbHSBK, duration=0, rapid=False)
else:
app.errorBox("Error", "Error. No bulb was selected. Please select a bulb from the pull-down menu (or tick the 'Select All' checkbox) and try again.")
return
updateSliders(bulbHSBK)
elif (name == "Light"):
#print("selected: ",selected_bulb.label)
#print("Power is Currently: {}".format(selected_bulb.power_level))
try:
onOff = selected_bulb.power_level;
except Exception as e:
print ("Ignoring error:", str(e))
app.errorBox("Error", str(e) + "\n\nTry selecting a bulb from the list first.")
return
#selected_bulb.set_power(not selected_bulb.get_power(), duration=0, rapid=True)
if "Power: Off" in details:
selected_bulb.set_power(65535, duration=0, rapid=False)
try:
app.setButtonImage("Light", resource_path("bulb_on.gif"));#print("PowerOn");
except Exception as e:
print ("Ignoring error:", str(e))
details = details.replace("Power: Off", "Power: On");
app.clearTextArea("Result")
app.setTextArea("Result", details)
else:
selected_bulb.set_power(0, duration=0, rapid=False)
try:
app.setButtonImage("Light", resource_path("bulb_off.gif"));#print("PowerOff");
except Exception as e:
print ("Ignoring error:", str(e))
details = details.replace("Power: On", "Power: Off"); #print("details:\n",details)
app.clearTextArea("Result")
app.setTextArea("Result", details)
app.setButton ( "Light", "Toggle " + (app.getOptionBox("LIFX Bulbs")) )
app.showButton("Light")
#listChanged()
def rainbow_press(name):
global gExpectedBulbs
global bulbs
global lan
#print ("len(bulbs):",len(bulbs))
try:
print("Discovering lights...")
lan = lifxlan.LifxLAN(int(gExpectedBulbs) if int(gExpectedBulbs) != 0 else None)
if lan is None:
print("Error finding bulbs")
return
bulbs = lan.get_lights()
if len(bulbs) < 1:
print("No bulbs found. Exiting.")
return
#print("lan:",lan,"type(lan):",type(lan))
original_colors = lan.get_color_all_lights()
original_powers = lan.get_power_all_lights()
print("Turning on all lights...")
lan.set_power_all_lights(True)
sleep(1)
print("Flashy fast rainbow")
rainbow(lan, 0.4)
#print("Smooth slow rainbow")
#rainbow(lan, 1, smooth=True)
print("Restoring original color to all lights...")
for light in original_colors:
light.set_color(original_colors[light])
sleep(1)
print("Restoring original power to all lights...")
for light in original_powers:
light.set_power(original_powers[light])
except Exception as e:
print ("Ignoring error:", str(e))
def rainbow(lan, duration_secs=0.5, smooth=False):
colors = [RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, PURPLE, PINK]
transition_time_ms = duration_secs * 1000 if smooth else 500
rapid = True if duration_secs < 1 else False
for i in range(0, 3):
for color in colors:
lan.set_color_all_lights(color, transition_time_ms, rapid)
sleep(duration_secs)
def maxPressed(name):
global maxSaturation
global maxBrightness
if (name == MAX_SATURATION):
maxSaturation = app.getCheckBox(MAX_SATURATION)
print(name, " is ", maxSaturation)
config['maxSaturation'] = maxSaturation
elif (name == MAX_BRIGHTNESS):
maxBrightness = app.getCheckBox(MAX_BRIGHTNESS)
print(name, " is ", maxBrightness)
config['maxBrightness']=maxBrightness
config.write()
def followDesktop():
global gSelectAll
global lan
global is_follow
global selected_bulb
global r
global maxSaturation
global maxBrightness
screen_width = app.winfo_screenwidth()
screen_height = app.winfo_screenheight()
print("screen_width:", screen_width, " screen_height:", screen_height)
print("Follow:", is_follow)
duration = app.getEntry(TRANSITION_TIME)
is_evening = app.getCheckBox("Evening Mode")
config['transtime'] = duration
config['is_evening'] = is_evening
config.write()
print("r:", r)
print("Starting Loop")
left = r[0] # The x-offset of where your crop box starts
top = r[1] # The y-offset of where your crop box starts
width = r[2] # The width of crop box
height = r[3] # The height of crop box
box = (left, top, left + width, top + height)
if (is_follow):
app.hideEntry(TRANSITION_TIME)
app.hideOptionBox(DESKTOP_MODE)
app.showLabel(REGION_COLOR)
app.hideCheckBox("Evening Mode")
sct = mss()
while (is_follow):
start = time.time()
try:
# fast screenshot with mss module
sct_img = sct.grab(box)
image = Image.frombytes('RGB', sct_img.size, sct_img.rgb)
except Exception as e:
print ("Ignoring error:", str(e))
try:
# downsample to 1/10th and calculate average RGB color
pixels = np.array(image, dtype=np.float32)
pixels = pixels[::10,::10,:]
pixels = np.transpose(pixels)
dominant_color = [np.mean(channel) for channel in pixels]
c = Color(rgb=(dominant_color[0]/255, dominant_color[1]/255, dominant_color[2]/255))
app.setLabelBg(REGION_COLOR, c.hex_l)
# get HSVK color from RGB color
# during evenings, kelvin is 3500 (default value returned above)
# during the daytime, saturated colors are still 3500 K,
# but the whiter the color, the cooler, up to 5000 K
(h, s, v, k) = RGBtoHSBK(dominant_color)
if not is_evening:
k = int(5000 - (s/65535 * 1500))
if (maxSaturation) and (s > 6553):
s = 65535
if (maxBrightness) and (True):
v = 65535
bulbHSBK = [h, s, v, k]
try:
if gSelectAll:
lan.set_color_all_lights(bulbHSBK, duration=duration, rapid=True)
elif selected_bulb:
selected_bulb.set_color(bulbHSBK, duration=duration, rapid=True)
else:
app.errorBox("Error", "Error. No bulb was selected. Please select a bulb from the pull-down menu (or tick the 'Select All' checkbox) and try again.")
app.setCheckBox("FOLLOW_DESKTOP", False)
is_follow = False
return
except Exception as e:
print ("Ignoring error: ", str(e))
except Exception as e:
print("Ignoring error: ", str(e))
# rate limit to prevent from spamming bulbs
# the theoretical max speed that the bulbs can handle is one packet
# every 0.05 seconds, but empirically I found that 0.1 sec looked better
max_speed_sec = 0.1
elapsed_time = time.time() - start
wait_time = max_speed_sec - elapsed_time
if wait_time > 0:
sleep(wait_time)
#print(elapsed_time, time.time()-start)
print("Exiting loop")
def followDesktopPressed(name):
global is_follow
global r
global selectedMode
is_follow = app.getCheckBox(FOLLOW_DESKTOP)
app.showEntry(TRANSITION_TIME)
app.showOptionBox(DESKTOP_MODE)
app.showCheckBox("Evening Mode")
app.hideLabel(REGION_COLOR)
if (is_follow):
print("Pressed:", name, " Follow:", is_follow)
if (selectedMode == "Whole Screen"):
print("Doing Whole Screen processing")
screen_width = app.winfo_screenwidth()
screen_height = app.winfo_screenheight()
r = (0, 0, screen_width, screen_height)
else:
print("Doing Partial Screen processing")
app.setTransparency(0)
app.infoBox("Select Region", "A new window entitled \"Screenshot\" will pop up. Drag a rectangle around the region of interest and press ENTER . This region's dominant color will be sent to the bulbs to match. To Cancel, press c .", parent=None)
myos = system()
image = ImageGrab.grab()
if (myos == 'Linux') or (myos == 'Darwin'):
print("Mac OS detected.")
open_cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
elif (myos == 'Windows'):
print("Windows OS detected.")
open_cv_image = np.array(image)
# Convert RGB to BGR
im = open_cv_image[:,:,::-1].copy()
if (myos == 'Linux') or (myos == 'Darwin'):
screen_width = app.winfo_screenwidth()
screen_height = app.winfo_screenheight()
im = cv2.resize(im, (int(screen_width * 0.9), int(screen_height * 0.9)))
cv2.namedWindow("Screenshot", cv2.WINDOW_AUTOSIZE)
cv2.moveWindow("Screenshot", 0, 0)
cv2.imshow("Screenshot", im)
elif (myos == 'Windows'):
cv2.namedWindow("Screenshot", cv2.WINDOW_NORMAL)
r = cv2.selectROI("Screenshot", im, False)
#cv2.waitKey()
print ("r type:", type(r))
print("r is", r)
if not any(r):
print("No region selected. Exiting")
cv2.destroyAllWindows()
app.setCheckBox(FOLLOW_DESKTOP, False)
is_follow = False
app.setTransparency(1)
return
#cv2.waitKey(0)
cv2.destroyAllWindows()
app.setTransparency(1)
app.thread(followDesktop)
bulbList = ["-None- "]
app = App("LIFX Controller")
#app = gui("LIFX Controller")
app.setStretch("both")
app.setResizable(True)
#app.setFont(12)
app.setFont(size=12, family="Arial")
app.setSticky("new")
app.startLabelFrame("", 0, 0)
app.setSticky("new")
app.startLabelFrame("Find", 0, 0)
app.setSticky("new")
app.setPadding(1)
app.addFlashLabel("f1", "Start here --->", 0, 0)
app.addButton("Find Bulbs", press, 0, 1)
expected_range = list(range(1, 20))
app.addLabelSpinBox ( "Expected Bulbs", list(reversed(range(20))), 0, 2 )
app.setSpinBox("Expected Bulbs", EXPECTED_BULBS)
gExpecteBulbs = app.getSpinBox("Expected Bulbs")
app.setSpinBoxChangeFunction("Expected Bulbs", expectedPressed)
app.setSpinBoxWidth("Expected Bulbs", 2)
app.setSpinBoxTooltip("Expected Bulbs", EXPECTED_TIP)
app.setLabelTooltip("Expected Bulbs", EXPECTED_TIP)
app.addLabel("lbl2", " ", 1, 0)
app.setLabelBg("lbl2", "white")
app.addNamedCheckBox("Select All Bulbs", "Select All", 1, 2)
app.setCheckBoxChangeFunction("Select All", selectAllPressed)
app.addOptionBox("LIFX Bulbs", bulbList, 1, 1)
app.setOptionBoxChangeFunction("LIFX Bulbs", listChanged)
app.setSticky("n")
try:
app.addImageButton("Light", press, resource_path("bulb_off.gif"), 2, 2)
except Exception as e:
print ("Ignoring error:", str(e))
#app.errorBox("Error", str(e)+"\n\nTry selecting a bulb from the list first.")
#return
app.setButton( "Light", "Toggle Selected" )
#app.setButtonHeight ( "Light", 40 )
#app.hideButton("Light")
app.stopLabelFrame()
#-------------------------------------------------------------------------------
app.startLabelFrame("Scenes", 0, 1)
app.setSticky("news")
app.addEntry("Scene 1", 0, 0)
app.setEntryChangeFunction("Scene 1", SceneNameChanged)
app.addNamedButton("Save", "Save Scene 1", Scene, 0, 1)
app.addNamedButton("Restore", "Restore Scene 1", Scene, 0, 2)
app.addEntry("Scene 2", 1, 0)
app.setEntryChangeFunction("Scene 2", SceneNameChanged)
app.addNamedButton("Save", "Save Scene 2", Scene, 1, 1)
app.addNamedButton("Restore", "Restore Scene 2", Scene, 1, 2)
app.addEntry("Scene 3", 2, 0)
app.setEntryChangeFunction("Scene 3", SceneNameChanged)
app.addNamedButton("Save", "Save Scene 3", Scene, 2, 1)
app.addNamedButton("Restore", "Restore Scene 3", Scene, 2, 2)
app.stopLabelFrame()
#-------------------------------------------------------------------------------
#app.setButtonImage("picker", resource_path("colorpicker.gif"), align=None)
###
app.setSticky("ne")
app.startLabelFrame("All LAN Bulbs", 0, 2)
app.setSticky("new")
app.addButton("All Off", press, 2, 2)
app.addButton("All On", press, 3, 2)
app.addButton("All White", press, 4, 2)
app.addButton("All Rainbow", rainbow_press, 5, 2)
app.addButton("All Random", press, 6, 2)
#app.addButton("All Waveform", rainbow_press,6,2)
app.stopLabelFrame()
#-------------------------------------------
app.setSticky("sew")
app.startLabelFrame("HSBK Values", 1, 0)
app.setSticky("news")
app.setPadding(5, 5)
app.addButton("Pick Color", press, 3, 3)
#app.hideButton ( "Pick Color" )
app.addLabel("hueLab", "Hue (H):", 0, 0)
app.addLabel("satLab", "Saturation (S):", 1, 0)
app.addLabel("briLab", "Brightness (B):", 2, 0)
app.addLabel("kelLab", "Kelvin (K) Warmth:", 3, 0)
app.setLabelAlign("hueLab", "left")
app.setLabelAlign("satLab", "left")
app.setLabelAlign("briLab", "left")
app.setLabelAlign("kelLab", "left")
app.addSpinBox("hueSpin", list(reversed(range(65536))), 0, 1)
app.addSpinBox("satSpin", list(reversed(range(65536))), 1, 1)
app.addSpinBox("briSpin", list(reversed(range(65536))), 2, 1)
app.addSpinBox("kelSpin", list(reversed(range(2500, 9001, 1))), 3, 1)
app.setSpinBox("hueSpin", 0)
app.setSpinBox("satSpin", 0)
app.setSpinBox("briSpin", 0)
app.setSpinBox("kelSpin", 3500)
app.setSpinBoxWidth("hueSpin", 5)
app.setSpinBoxWidth("satSpin", 5)
app.setSpinBoxWidth("briSpin", 5)
app.setSpinBoxWidth("kelSpin", 5)
app.setSpinBoxChangeFunction("hueSpin", updateHSB)
app.setSpinBoxChangeFunction("satSpin", updateHSB)
app.setSpinBoxChangeFunction("briSpin", updateHSB)
app.setSpinBoxChangeFunction("kelSpin", updateHSB)
app.addScale("hueScale", 0, 2)
app.addScale("satScale", 1, 2)
app.addScale("briScale", 2, 2)
app.addScale("kelScale", 3, 2)
app.setScaleRange("hueScale", 0, 65535)
app.setScaleRange("satScale", 0, 65535)
app.setScaleRange("briScale", 0, 65535)
app.setScaleRange("kelScale", 2500, 9000)
app.setScaleChangeFunction("hueScale", updateHSB)
app.setScaleChangeFunction("satScale", updateHSB)
app.setScaleChangeFunction("briScale", updateHSB)
app.setScaleChangeFunction("kelScale", updateHSB)
app.startLabelFrame("Bulb Color", 0, 3, 3, 3)
app.setSticky("news")
app.addLabel("bulbcolor", "", 0, 3, 3, 3)
app.setLabel("bulbcolor", " ")
app.setLabelHeight("bulbcolor", 5)
app.setLabelWidth("bulbcolor", 10)
app.setLabelBg("bulbcolor", "gray")
app.stopLabelFrame()
app.stopLabelFrame()
#-------------------------------------------
app.startLabelFrame("Waveform", 1, 1, 5, 1)
#app.setFrameWidth("Waveform",20)
#app.setSticky("news")
app.setSticky("w")
app.addRadioButton("waveform", "Saw")
app.addRadioButton("waveform", "Sine")
app.addRadioButton("waveform", "HalfSine")
app.addRadioButton("waveform", "Triangle")
app.addRadioButton("waveform", "Pulse (Strobe)")
app.setSticky("e")
app.addCheckBox("Transient", 0, 2)
app.setCheckBox("Transient")
app.addButton("Secondary Color", press, 1, 1)
app.addLabel("lblwaveformcolor", " ", 1, 2)
app.setLabelBg("lblwaveformcolor", "#FF0000")
app.setLabelWidth("lblwaveformcolor", 20)
app.addLabelEntry("Period(ms)", 2, 2)
app.setEntryWidth("Period(ms)", 6)
app.setEntry("Period(ms)", "500")
app.addLabelEntry(CYCLES, 3, 2)
app.setEntryWidth(CYCLES, 6)
app.setEntry(CYCLES, "5")
app.addLabelEntry("Duty Cycle", 4, 2)
app.setEntryWidth("Duty Cycle", 6)
app.setEntry("Duty Cycle", "0")
app.setEntryTooltip("Duty Cycle", DUTY_CYCLE_TIP)
app.setLabelTooltip("Duty Cycle", DUTY_CYCLE_TIP)
app.setEntryTooltip("Cycles", CYCLES_TIP)
app.setLabelTooltip(CYCLES, CYCLES_TIP)
app.setEntryTooltip("Period(ms)", PERIOD_TIP)
app.setLabelTooltip("Period(ms)", PERIOD_TIP)
app.setCheckBoxTooltip("Transient", TRANSIENT_TIP)
app.setSticky("ew")
app.addButton("Execute", press, 5, 0, colspan=3)
app.setButtonBg("Execute", "cyan")
app.stopLabelFrame()
#-------------------------------------------
app.stopLabelFrame()
#----------------------------------------------------
#app.setSticky("news")
app.startLabelFrame("Bulb Details", 5, 0)
app.setSticky("ew")
app.addScrolledTextArea("Result", 0, 0)
#app.setTextAreaWidth("Result", 45)
app.setTextAreaHeight("Result", 25)
app.setTextArea("Result", test_string)
app.stopLabelFrame()
#-----------------------------------------------------
#-------------------------------------------
app.startLabelFrame(FOLLOW_DESKTOP, 2, 0)
#app.setSticky("n")
modeList = ["-Select Region- "]
modeList.append("Whole Screen")
modeList.append("Rectangular Region")
app.setSticky("w")
app.addCheckBox(FOLLOW_DESKTOP, 0, 0)
app.setCheckBoxChangeFunction(FOLLOW_DESKTOP, followDesktopPressed)
app.addOptionBox(DESKTOP_MODE, modeList, 0, 1)
app.setOptionBoxChangeFunction(DESKTOP_MODE, modeChanged)
app.setOptionBox(DESKTOP_MODE, "Whole Screen", callFunction=False)
app.addLabelEntry(TRANSITION_TIME, 0, 2)
app.setEntryWidth(TRANSITION_TIME, 6)
app.setEntry(TRANSITION_TIME, TRANSITION_TIME_DEFAULT)
#app.startLabelFrame("Region Color", 0, 3)
app.addLabel(REGION_COLOR, "", 1, 0, colspan=5)
app.setLabel(REGION_COLOR, " Desktop Region's Dominant Color")
app.setLabelHeight(REGION_COLOR, 1)
app.setLabelBg(REGION_COLOR, "gray")
app.hideLabel(REGION_COLOR)
app.setSticky("e")
app.addCheckBox(MAX_SATURATION, 0, 3)
app.addCheckBox(MAX_BRIGHTNESS, 0, 4)
app.setCheckBoxChangeFunction(MAX_SATURATION, maxPressed)
app.setCheckBoxChangeFunction(MAX_BRIGHTNESS, maxPressed)
app.addCheckBox("Evening Mode",0,5)
#app.hideCheckBox(MAX_SATURATION)
#app.hideCheckBox(MAX_BRIGHTNESS)
app.setEntryTooltip(TRANSITION_TIME, TRANSITION_TIME_TIP)
app.setLabelTooltip(TRANSITION_TIME, TRANSITION_TIME_TIP)
app.setCheckBoxTooltip(FOLLOW_DESKTOP, FOLLOW_DESKTOP_TIP)
app.setOptionBoxTooltip(DESKTOP_MODE, DESKTOP_MODE_TIP)
app.stopLabelFrame()
#-------------------------------------------
if not os.path.exists(CONFIG):
print("Creating .ini file")
open(CONFIG, 'w').close()
config = ConfigObj(CONFIG)
config['expectedbulbs'] = 0
config['Scene 1'] = "Scene 1"
config['Scene 2'] = "Scene 2"
config['Scene 3'] = "Scene 3"
config['transtime'] = 200
config['waveform'] = 'Saw'
config['transient'] = True
config['period'] = 500
config['cycles'] = 5
config['duty_cycle'] = 0
config['secondary_color'] = "#FF0000"
config['maxSaturation'] = False
config['maxBrightness'] = False
config['is_evening'] = False
config.write()
#print(".ini file exists")
config = ConfigObj(CONFIG)
print("config:", config)
if 'maxSaturation' in config:
maxSaturation = (config['maxSaturation']=='True')
app.setCheckBox(MAX_SATURATION,ticked=(config['maxSaturation']=='True'),callFunction=False)
if 'maxBrightness' in config:
maxBrightness = (config['maxBrightness']=='True')
app.setCheckBox(MAX_BRIGHTNESS,ticked=(config['maxBrightness']=='True'),callFunction=False)
if 'is_evening' in config:
app.setCheckBox("Evening Mode",ticked=(config['is_evening']=='True'),callFunction=False)
if 'waveform' in config:
app.setRadioButton("waveform",config['waveform'])
if 'transient' in config:
app.setCheckBox("Transient",config['transient'])
if 'period' in config:
app.setEntry("Period(ms)",config['period'])
if 'cycles' in config:
app.setEntry(CYCLES,config['cycles'])
if 'duty_cycle' in config:
app.setEntry("Duty Cycle",config['duty_cycle'])
if 'secondary_color' in config:
app.setLabelBg("lblwaveformcolor", config['secondary_color'])
if 'expectedbulbs' in config:
app.setSpinBox("Expected Bulbs", config['expectedbulbs'])
if 'transtime' in config:
app.setEntry(TRANSITION_TIME, config['transtime'])
if 'Scene 1' in config:
app.setEntry("Scene 1", config["Scene 1"], callFunction=False)
if 'Scene 2' in config:
app.setEntry("Scene 2", config["Scene 2"], callFunction=False)
if 'Scene 3' in config:
app.setEntry("Scene 3", config["Scene 3"], callFunction=False)
#print("config['bulbs']:",config['bulbs'])
#print("type(config['bulbs']):",type(config['bulbs']))
if os.path.exists(PICKLE):
bulbPickle = pkl.load(open(PICKLE, "rb")) #this reads the pickle
#print (bulbPickle)
bulbList.clear()
bulbList.append("-Select Bulb-")
for i, bulb in enumerate(bulbPickle):
#print ("mac:",bulb['mac']);
light = lifxlan.Light(bulb['mac'], bulb['ip'])
light.label = bulb['label']
bulbs.append(light)
bulbList.append(bulb['label'])
if len(bulbs) > 0:
app.clearOptionBox("LIFX Bulbs", callFunction=False)
app.changeOptionBox("LIFX Bulbs", bulbList, callFunction=False)
app.setLabelBg("lbl2", mygreen)
app.hideLabel("f1")
app.setLabel("lbl2", "Recalled " + str(len(bulbs)) + " bulbs")
app.setCheckBox("Select All")
#light = Light("12:34:56:78:9a:bc", "192.168.1.42")
#print("bulbs:",bulbs)
lan = lifxlan.LifxLAN()
app.go()
| 34.548077 | 347 | 0.618193 |
import sys
if sys.version_info < (3, 3):
sys.stdout.write("Sorry, This module requires Python 3.3 (or higher), not Python 2.x. You are using Python {0}.{1}\n".format(sys.version_info[0],sys.version_info[1]))
sys.exit(1)
from appJar import gui
import os
import time
import binascii
import lifxlan
import colorsys
from colour import Color
import math
import sys
from time import sleep
from lifxlan import BLUE, CYAN, GREEN, ORANGE, PINK, PURPLE, RED, YELLOW
from configobj import ConfigObj
import pickle as pkl
from random import randint
from platform import system
from PIL import Image
import appJar as aJ
import numpy as np
import cv2
from scipy.stats import itemfreq
from mss import mss
myos = system()
if (myos == 'Windows') or (myos == 'Darwin'):
from PIL import ImageGrab
elif (myos == 'Linux'):
import pyscreenshot as ImageGrab
if (myos == 'Windows'):
mygreen = 'lime'
elif (myos == 'Darwin') or (myos == 'Linux') :
mygreen = 'green'
def resource_path(relative_path):
if (myos == 'Windows'):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
elif (myos == 'Darwin') or (myos == 'Linux') :
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
DECIMATE = 1
TRANSIENT_TIP = "If selected, return to the original color after the specified number of cycles. If not selected, set light to specified color"
PERIOD_TIP = "Period is the length of one cycle in milliseconds"
CYCLES_TIP = "Cycles is the number of times to repeat the waveform"
DUTY_CYCLE_TIP = "Duty Cycle is an integer between -32768 and 32767. Its effect is most obvious with the Pulse waveform. Set Duty Cycle to 0 to spend an equal amount of time on the original color and the new color. Set Duty Cycle to positive to spend more time on the original color. Set Duty Cycle to negative to spend more time on the new color"
EXPECTED_TIP = "Select 0 to find all available bulbs. Select any number to look for exactly that number of bulbs"
TRANSITION_TIME_TIP = "The time (in ms) that a color transition takes"
FOLLOW_DESKTOP_TIP = "Make your bulbs' color match your desktop"
DESKTOP_MODE_TIP = "Select between following the whole desktop screen or just a small portion of it (useful for letterbox movies)"
EXPECTED_BULBS = 0
TRANSITION_TIME_DEFAULT = 400
CONFIG = resource_path("lights.ini")
PICKLE = resource_path("lifxList.pkl")
SCENE1_C = resource_path("scene1_c.pkl")
SCENE1_P = resource_path("scene1_p.pkl")
SCENE2_C = resource_path("scene2_c.pkl")
SCENE2_P = resource_path("scene2_p.pkl")
SCENE3_C = resource_path("scene3_c.pkl")
SCENE3_P = resource_path("scene3_p.pkl")
CYCLES = "Cycles"
TRANSITION_TIME = "Transition Time(ms)"
FOLLOW_DESKTOP = "Follow Desktop"
DESKTOP_MODE = "Desktop Mode"
REGION_COLOR = "regioncolor"
MAX_SATURATION = "Max Saturation"
MAX_BRIGHTNESS = "Max Brightness"
alreadyDone = False
config = {}
bulbs = []
selected_bulb = 0
details = str(0)
gSelectAll = False
lan = 0
gExpectedBulbs = EXPECTED_BULBS
lifxList = []
lifxDict = {}
gwaveformcolor = "#FF0000"
is_follow = False
test_string = """
"""
original_colors1 = {}
original_powers1 = {}
original_colors2 = {}
original_powers2 = {}
original_colors3 = {}
original_powers3 = {}
r = None
selectedMode = "Whole Screen"
maxSaturation = False
maxBrightness = False
class App(aJ.gui):
def __init__(self, *args, **kwargs):
aJ.gui.__init__(self, *args, **kwargs)
def winfo_screenheight(self):
# shortcut to height
# alternatively return self.topLevel.winfo_screenheight() since topLevel is Tk (root) instance!
return self.appWindow.winfo_screenheight()
def winfo_screenwidth(self):
# shortcut to width
# alternatively return self.topLevel.winfo_screenwidth() since topLevel is Tk (root) instance!
return self.appWindow.winfo_screenwidth()
def SceneNameChanged(name):
#print(name, "Entry changed")
config[name] = app.getEntry(name)
config.write()
def Scene(name):
global original_colors1
global original_powers1
global original_colors2
global original_powers2
global original_colors3
global original_powers3
global lan
global config
print(name, "button pressed")
if len(bulbs) < 1:
app.errorBox("Error", "Error. No bulbs were found yet. Please click the 'Find Bulbs' button and try again.")
return
try:
if name == 'Save Scene 1':
print("Saving Scene 1")
original_colors1 = lan.get_color_all_lights()
original_powers1 = lan.get_power_all_lights()
#print("colors:",original_colors)
#print(type(original_colors1))
pkl.dump(original_colors1, open(SCENE1_C, "wb" ))
pkl.dump(original_powers1, open(SCENE1_P, "wb" ))
elif name == 'Restore Scene 1':
print("Restoring Scene 1")
if (os.path.exists(SCENE1_C) and os.path.exists(SCENE1_P) ):
original_colors1 = pkl.load(open(SCENE1_C, "rb"))
original_powers1 = pkl.load(open(SCENE1_P, "rb"))
if ( (len(original_colors1) == 0) or (len(original_powers1) == 0) ):
print("Nothing saved yet.")
return
print("Restoring original color to all lights...")
#print("colors:",original_colors)
for light in original_colors1:
light.set_color(original_colors1[light])
sleep(1)
print("Restoring original power to all lights...")
for light in original_powers1:
light.set_power(original_powers1[light])
elif name == 'Save Scene 2':
print("Saving Scene 2")
original_colors2 = lan.get_color_all_lights()
original_powers2 = lan.get_power_all_lights()
#print("colors:",original_colors)
pkl.dump(original_colors2, open(SCENE2_C, "wb" ))
pkl.dump(original_powers2, open(SCENE2_P, "wb" ))
elif name == 'Restore Scene 2':
print("Restoring Scene 2")
if (os.path.exists(SCENE2_C) and os.path.exists(SCENE2_P) ):
original_colors2 = pkl.load(open(SCENE2_C, "rb"))
original_powers2 = pkl.load(open(SCENE2_P, "rb"))
if ( (len(original_colors2) == 0) or (len(original_powers2) == 0) ):
print("Nothing saved yet.")
return
print("Restoring original color to all lights...")
#print("colors:",original_colors)
for light in original_colors2:
light.set_color(original_colors2[light])
sleep(1)
print("Restoring original power to all lights...")
for light in original_powers2:
light.set_power(original_powers2[light])
elif name == 'Save Scene 3':
print("Saving Scene 3")
original_colors3 = lan.get_color_all_lights()
original_powers3 = lan.get_power_all_lights()
#print("colors:",original_colors)
pkl.dump(original_colors3, open(SCENE3_C, "wb" ))
pkl.dump(original_powers3, open(SCENE3_P, "wb" ))
elif name == 'Restore Scene 3':
print("Restoring Scene 3")
if (os.path.exists(SCENE3_C) and os.path.exists(SCENE3_P) ):
original_colors3 = pkl.load(open(SCENE3_C, "rb"))
original_powers3 = pkl.load(open(SCENE3_P, "rb"))
if ( (len(original_colors3) == 0) or (len(original_powers3) == 0) ):
print("Nothing saved yet.")
return
print("Restoring original color to all lights...")
#print("colors:",original_colors)
for light in original_colors3:
light.set_color(original_colors3[light])
sleep(1)
print("Restoring original power to all lights...")
for light in original_powers3:
light.set_power(original_powers3[light])
except Exception as e:
print ("Ignoring error: ", str(e))
app.errorBox("Error", str(e) + "\n\n Scene Operation failed. This feature is buggy and only works about 50% of the time. Sometimes, you can still save and restore a scene despite this error. If you keep getting this error and can not perform a 'Restore', try restarting the app then try again.")
return
def updateSliders(hsbk):
#print("h:",hsbk[0])
#print("s:",hsbk[1])
#print("b:",hsbk[2])
#print("k:",hsbk[3])
app.setSpinBox("hueSpin", int(hsbk[0]), callFunction=False)
app.setSpinBox("satSpin", int(hsbk[1]), callFunction=False)
app.setSpinBox("briSpin", int(hsbk[2]), callFunction=False)
app.setSpinBox("kelSpin", int(hsbk[3]), callFunction=False)
app.setScale("hueScale", int(hsbk[0]), callFunction=False)
app.setScale("satScale", int(hsbk[1]), callFunction=False)
app.setScale("briScale", int(hsbk[2]), callFunction=False)
app.setScale("kelScale", int(hsbk[3]), callFunction=False)
def RGBtoHSBK (RGB, temperature = 3500):
cmax = max(RGB)
cmin = min(RGB)
cdel = cmax - cmin
brightness = int((cmax/255) * 65535)
if cdel != 0:
saturation = int(((cdel) / cmax) * 65535)
redc = (cmax - RGB[0]) / (cdel)
greenc = (cmax - RGB[1]) / (cdel)
bluec = (cmax - RGB[2]) / (cdel)
if RGB[0] == cmax:
hue = bluec - greenc
else:
if RGB[1] == cmax:
hue = 2 + redc - bluec
else:
hue = 4 + greenc - redc
hue = hue / 6
if hue < 0:
hue = hue + 1
hue = int(hue*65535)
else:
saturation = 0
hue = 0
return (hue, saturation, brightness, temperature)
# function to convert the scale values to an RGB hex code
def getHSB():
H = app.getScale("hueScale")
S = app.getScale("satScale")
B = app.getScale("briScale")
K = app.getScale("kelScale")
#RGB = "#"+str(R)+str(G)+str(B)
return {'H':H, 'S':S,'B':B, 'K':K }
# funciton to update widgets
def updateHSB(name):
# this stops the changes in slider/spin from constantly calling each other
#print ("name:",name)
global alreadyDone
if alreadyDone:
alreadyDone = False
return
else:
alreadyDone = True
# split the widget's name into the type & colour
colour = name[0:3]
widg = name[3:]
HSB = getHSB()
if widg == "Scale":
value = app.getScale(name)
app.setSpinBox(colour + "Spin", value)
elif widg == "Spin":
value = app.getSpinBox(name)
app.setScale(colour + "Scale", value)
h = HSB["H"] / 65535.0;
s = HSB["S"] / 65535.0;
v = HSB["B"] / 65535.0;
k = HSB["K"];
rgb1 = hsv_to_rgb(h, s, v);
c = Color(rgb=(rgb1[0], rgb1[1], rgb1[2]))
app.setLabelBg("bulbcolor", c.hex_l)
global selected_bulb
bulbHSBK = [HSB["H"],HSB["S"],HSB["B"],k]
if gSelectAll:
lan.set_color_all_lights(bulbHSBK, duration=0, rapid=False)
elif selected_bulb:
selected_bulb.set_color(bulbHSBK, duration=0, rapid=False)
def selectAllPressed (name):
global bulbs
if len(bulbs) < 1:
app.errorBox("Error", "Error. No bulbs were found yet. Please click the 'Find Bulbs' button and try again.")
app.setCheckBox("Select All", ticked=False, callFunction=False)
return
global gSelectAll
gSelectAll = app.getCheckBox("Select All")
def expectedPressed (name):
global gExpectedBulbs
global config
gExpectedBulbs = int(app.getSpinBox("Expected Bulbs"))
config['expectedbulbs'] = gExpectedBulbs
config.write()
def rgb_to_hsv(r, g, b):
r = float(r)
g = float(g)
b = float(b)
high = max(r, g, b)
low = min(r, g, b)
h, s, v = high, high, high
d = high - low
s = 0 if high == 0 else d / high
if high == low:
h = 0.0
else:
h = {
r: (g - b) / d + (6 if g < b else 0),
g: (b - r) / d + 2,
b: (r - g) / d + 4,
}[high]
h /= 6
return h, s, v
def hsv_to_rgb(h, s, v):
i = math.floor(h * 6)
f = h * 6 - i
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
r, g, b = [
(v, t, p),
(q, v, p),
(p, v, t),
(p, q, v),
(t, p, v),
(v, p, q),
][int(i % 6)]
return r, g, b
def modeChanged():
global selectedMode
selectedMode = (app.getOptionBox("Desktop Mode"))
def listChanged():
app.clearTextArea("Result");
app.setTextArea("Result", "Loading bulb details")
selected = (app.getOptionBox("LIFX Bulbs"))
global bulbs
global selected_bulb
global details
try:
for bulb in bulbs:
if (bulb.label == selected):
selected_bulb = bulb
details = str(selected_bulb)
break
except Exception as e:
print ("Ignoring error: ", str(e))
app.errorBox("Error", str(e))
app.clearTextArea("Result");
app.setTextArea("Result", str(e))
return
app.clearTextArea("Result")
app.setTextArea("Result", details)
try:
if "Power: On" in details:
app.setButtonImage("Light", resource_path("bulb_on.gif"))
elif "Power: Off" in details:
app.setButtonImage("Light", resource_path("bulb_off.gif"))
except Exception as e:
print ("Ignoring error:", str(e))
app.setButton ( "Light", "Toggle " + selected )
app.showButton("Light")
color = bulb.get_color();
h = color[0] / 65535.0;
s = color[1] / 65535.0;
v = color[2] / 65535.0;
rgb1 = hsv_to_rgb(h, s, v);
c = Color(rgb=(rgb1[0], rgb1[1], rgb1[2]))
app.setLabelBg("bulbcolor", c.hex_l)
updateSliders(color)
def finder():
global bulbList
global lan
global gExpectedBulbs
global config
global lifxList
global lifxDict
global config
bulbList.clear()
bulbList.append("-Select Bulb-")
try:
global bulbs
lan = lifxlan.LifxLAN(int(gExpectedBulbs) if int(gExpectedBulbs) != 0 else None)
bulbs = lan.get_lights()
if len(bulbs) < 1:
app.errorBox("Error", "No bulbs found. Please try again. If you switched WiFi networks, please re-start the app and try again.")
app.setLabelBg("lbl2", "red")
app.setLabel("lbl2", "Found 0 bulbs")
return
else:
app.setLabelBg("lbl2", mygreen)
app.hideLabel("f1")
app.setLabel("lbl2", "Found " + str(len(bulbs)) + " bulbs")
app.setCheckBox("Select All")
del lifxList[:]
for bulb in bulbs:
l()
ip = bulb.ip_addr
mac = bulb.mac_addr
lifxDict['label'] = label
lifxDict['mac'] = mac
lifxDict['ip'] = ip
lifxList.append(lifxDict.copy())
bulbList.append(label)
app.changeOptionBox("LIFX Bulbs", bulbList, callFunction=False)
app.showButton ( "Pick Color" )
pkl.dump(lifxList, open(PICKLE, "wb" ))
except Exception as e:
print ("Ignoring error:", str(e))
app.setLabelBg("lbl2", "gray")
app.setLabel("lbl2", "Found 0 bulbs")
app.errorBox("Error", str(e) + "\n\nPlease try again. If you keep getting this error, check/toggle your WiFi, ensure that 'Expected Bulbs' is either 0 or the number of bulbs you have and finally, try restarting the app")
print ("finder() Ended")
def press(name):
global bulbs
global details
global gSelectAll
global lan
global gwaveformcolor
global selected_bulb
if (name == "Find Bulbs"):
finder()
elif (name == "All Off"):
if len(bulbs) < 1:
return
lan.set_power_all_lights(False, rapid=True)
elif (name == "All Random"):
if len(bulbs) < 1:
return
selected = (app.getOptionBox("LIFX Bulbs"))
for bulb in bulbs:
hue = (randint(0, 65535))
sat = (randint(40000, 65535))
bulb.set_color([hue, sat, 65535, 3500], duration=0, rapid=True)
if (bulb.label == selected):
h = hue / 65535.0;
s = sat / 65535.0;
v = 1;
rgb1 = hsv_to_rgb(h, s, v);
c = Color(rgb=(rgb1[0], rgb1[1], rgb1[2]))
app.setLabelBg("bulbcolor", c.hex_l)
updateSliders([hue,sat,65535,3500])
elif (name == "All On"):
if len(bulbs) < 1:
return
lan.set_power_all_lights(True, rapid=True)
elif (name == "All White"):
if len(bulbs) < 1:
return
lan.set_color_all_lights([0,0,65535,3500], duration=0, rapid=True)
updateSliders([0,0,65535,3500])
app.setLabelBg("bulbcolor", "#FFFFFF")
elif (name == "Execute"):
waveform = app.getRadioButton("waveform")
config['waveform'] = waveform
if waveform == "Saw":
waveform = 0
elif waveform == "Sine":
waveform = 1
elif waveform == "HalfSine":
waveform = 2
elif waveform == "Triangle":
waveform = 3
elif waveform == "Pulse (Strobe)":
waveform = 4
is_transient = app.getCheckBox("Transient")
config['transient'] = is_transient
if (is_transient):
is_transient = 1
else:
is_transient = 0
config['secondary_color'] = gwaveformcolor
c = Color(str(gwaveformcolor))
hsv = rgb_to_hsv(c.red, c.green, c.blue)
bulbHSBK = [hsv[0] * 65535.0,hsv[1] * 65535.0,hsv[2] * 65535.0,3500]
period = app.getEntry("Period(ms)")
cycles = app.getEntry(CYCLES)
duty_cycle = app.getEntry("Duty Cycle")
config['period'] = period
config['cycles'] = cycles
config['duty_cycle'] = duty_cycle
config.write()
if gSelectAll:
lan.set_waveform_all_lights(is_transient, bulbHSBK, period, cycles, duty_cycle, waveform, [1])
elif selected_bulb:
selected_bulb.set_waveform(is_transient, bulbHSBK, period, cycles, duty_cycle, waveform)
else:
app.errorBox("Error", "Error. No bulb was selected. Please select a bulb from the pull-down menu (or tick the 'Select All' checkbox) and try again.")
return
elif (name == "Secondary Color"):
pickedColor = app.colourBox(colour="#FF0000")
app.setLabelBg("lblwaveformcolor", pickedColor)
gwaveformcolor = pickedColor
elif (name == "Pick Color"):
pickedColor = app.colourBox(colour="#FFFFFF")
app.setLabelBg("bulbcolor", pickedColor)
if pickedColor == None:
return
c = Color(str(pickedColor))
hsv = rgb_to_hsv(c.red, c.green, c.blue)
bulbHSBK = [hsv[0] * 65535.0,hsv[1] * 65535.0,hsv[2] * 65535.0,3500]
if gSelectAll:
lan.set_color_all_lights(bulbHSBK, duration=0, rapid=False)
elif selected_bulb:
selected_bulb.set_color(bulbHSBK, duration=0, rapid=False)
else:
app.errorBox("Error", "Error. No bulb was selected. Please select a bulb from the pull-down menu (or tick the 'Select All' checkbox) and try again.")
return
updateSliders(bulbHSBK)
elif (name == "Light"):
try:
onOff = selected_bulb.power_level;
except Exception as e:
print ("Ignoring error:", str(e))
app.errorBox("Error", str(e) + "\n\nTry selecting a bulb from the list first.")
return
if "Power: Off" in details:
selected_bulb.set_power(65535, duration=0, rapid=False)
try:
app.setButtonImage("Light", resource_path("bulb_on.gif"));
except Exception as e:
print ("Ignoring error:", str(e))
details = details.replace("Power: Off", "Power: On");
app.clearTextArea("Result")
app.setTextArea("Result", details)
else:
selected_bulb.set_power(0, duration=0, rapid=False)
try:
app.setButtonImage("Light", resource_path("bulb_off.gif"));
except Exception as e:
print ("Ignoring error:", str(e))
details = details.replace("Power: On", "Power: Off");
app.clearTextArea("Result")
app.setTextArea("Result", details)
app.setButton ( "Light", "Toggle " + (app.getOptionBox("LIFX Bulbs")) )
app.showButton("Light")
def rainbow_press(name):
global gExpectedBulbs
global bulbs
global lan
try:
print("Discovering lights...")
lan = lifxlan.LifxLAN(int(gExpectedBulbs) if int(gExpectedBulbs) != 0 else None)
if lan is None:
print("Error finding bulbs")
return
bulbs = lan.get_lights()
if len(bulbs) < 1:
print("No bulbs found. Exiting.")
return
original_colors = lan.get_color_all_lights()
original_powers = lan.get_power_all_lights()
print("Turning on all lights...")
lan.set_power_all_lights(True)
sleep(1)
print("Flashy fast rainbow")
rainbow(lan, 0.4)
print("Restoring original color to all lights...")
for light in original_colors:
light.set_color(original_colors[light])
sleep(1)
print("Restoring original power to all lights...")
for light in original_powers:
light.set_power(original_powers[light])
except Exception as e:
print ("Ignoring error:", str(e))
def rainbow(lan, duration_secs=0.5, smooth=False):
colors = [RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, PURPLE, PINK]
transition_time_ms = duration_secs * 1000 if smooth else 500
rapid = True if duration_secs < 1 else False
for i in range(0, 3):
for color in colors:
lan.set_color_all_lights(color, transition_time_ms, rapid)
sleep(duration_secs)
def maxPressed(name):
global maxSaturation
global maxBrightness
if (name == MAX_SATURATION):
maxSaturation = app.getCheckBox(MAX_SATURATION)
print(name, " is ", maxSaturation)
config['maxSaturation'] = maxSaturation
elif (name == MAX_BRIGHTNESS):
maxBrightness = app.getCheckBox(MAX_BRIGHTNESS)
print(name, " is ", maxBrightness)
config['maxBrightness']=maxBrightness
config.write()
def followDesktop():
global gSelectAll
global lan
global is_follow
global selected_bulb
global r
global maxSaturation
global maxBrightness
screen_width = app.winfo_screenwidth()
screen_height = app.winfo_screenheight()
print("screen_width:", screen_width, " screen_height:", screen_height)
print("Follow:", is_follow)
duration = app.getEntry(TRANSITION_TIME)
is_evening = app.getCheckBox("Evening Mode")
config['transtime'] = duration
config['is_evening'] = is_evening
config.write()
print("r:", r)
print("Starting Loop")
left = r[0]
top = r[1]
width = r[2]
height = r[3]
box = (left, top, left + width, top + height)
if (is_follow):
app.hideEntry(TRANSITION_TIME)
app.hideOptionBox(DESKTOP_MODE)
app.showLabel(REGION_COLOR)
app.hideCheckBox("Evening Mode")
sct = mss()
while (is_follow):
start = time.time()
try:
sct_img = sct.grab(box)
image = Image.frombytes('RGB', sct_img.size, sct_img.rgb)
except Exception as e:
print ("Ignoring error:", str(e))
try:
pixels = np.array(image, dtype=np.float32)
pixels = pixels[::10,::10,:]
pixels = np.transpose(pixels)
dominant_color = [np.mean(channel) for channel in pixels]
c = Color(rgb=(dominant_color[0]/255, dominant_color[1]/255, dominant_color[2]/255))
app.setLabelBg(REGION_COLOR, c.hex_l)
(h, s, v, k) = RGBtoHSBK(dominant_color)
if not is_evening:
k = int(5000 - (s/65535 * 1500))
if (maxSaturation) and (s > 6553):
s = 65535
if (maxBrightness) and (True):
v = 65535
bulbHSBK = [h, s, v, k]
try:
if gSelectAll:
lan.set_color_all_lights(bulbHSBK, duration=duration, rapid=True)
elif selected_bulb:
selected_bulb.set_color(bulbHSBK, duration=duration, rapid=True)
else:
app.errorBox("Error", "Error. No bulb was selected. Please select a bulb from the pull-down menu (or tick the 'Select All' checkbox) and try again.")
app.setCheckBox("FOLLOW_DESKTOP", False)
is_follow = False
return
except Exception as e:
print ("Ignoring error: ", str(e))
except Exception as e:
print("Ignoring error: ", str(e))
max_speed_sec = 0.1
elapsed_time = time.time() - start
wait_time = max_speed_sec - elapsed_time
if wait_time > 0:
sleep(wait_time)
print("Exiting loop")
def followDesktopPressed(name):
global is_follow
global r
global selectedMode
is_follow = app.getCheckBox(FOLLOW_DESKTOP)
app.showEntry(TRANSITION_TIME)
app.showOptionBox(DESKTOP_MODE)
app.showCheckBox("Evening Mode")
app.hideLabel(REGION_COLOR)
if (is_follow):
print("Pressed:", name, " Follow:", is_follow)
if (selectedMode == "Whole Screen"):
print("Doing Whole Screen processing")
screen_width = app.winfo_screenwidth()
screen_height = app.winfo_screenheight()
r = (0, 0, screen_width, screen_height)
else:
print("Doing Partial Screen processing")
app.setTransparency(0)
app.infoBox("Select Region", "A new window entitled \"Screenshot\" will pop up. Drag a rectangle around the region of interest and press ENTER . This region's dominant color will be sent to the bulbs to match. To Cancel, press c .", parent=None)
myos = system()
image = ImageGrab.grab()
if (myos == 'Linux') or (myos == 'Darwin'):
print("Mac OS detected.")
open_cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
elif (myos == 'Windows'):
print("Windows OS detected.")
open_cv_image = np.array(image)
# Convert RGB to BGR
im = open_cv_image[:,:,::-1].copy()
if (myos == 'Linux') or (myos == 'Darwin'):
screen_width = app.winfo_screenwidth()
screen_height = app.winfo_screenheight()
im = cv2.resize(im, (int(screen_width * 0.9), int(screen_height * 0.9)))
cv2.namedWindow("Screenshot", cv2.WINDOW_AUTOSIZE)
cv2.moveWindow("Screenshot", 0, 0)
cv2.imshow("Screenshot", im)
elif (myos == 'Windows'):
cv2.namedWindow("Screenshot", cv2.WINDOW_NORMAL)
r = cv2.selectROI("Screenshot", im, False)
#cv2.waitKey()
print ("r type:", type(r))
print("r is", r)
if not any(r):
print("No region selected. Exiting")
cv2.destroyAllWindows()
app.setCheckBox(FOLLOW_DESKTOP, False)
is_follow = False
app.setTransparency(1)
return
#cv2.waitKey(0)
cv2.destroyAllWindows()
app.setTransparency(1)
app.thread(followDesktop)
bulbList = ["-None- "]
app = App("LIFX Controller")
#app = gui("LIFX Controller")
app.setStretch("both")
app.setResizable(True)
#app.setFont(12)
app.setFont(size=12, family="Arial")
app.setSticky("new")
app.startLabelFrame("", 0, 0)
app.setSticky("new")
app.startLabelFrame("Find", 0, 0)
app.setSticky("new")
app.setPadding(1)
app.addFlashLabel("f1", "Start here --->", 0, 0)
app.addButton("Find Bulbs", press, 0, 1)
expected_range = list(range(1, 20))
app.addLabelSpinBox ( "Expected Bulbs", list(reversed(range(20))), 0, 2 )
app.setSpinBox("Expected Bulbs", EXPECTED_BULBS)
gExpecteBulbs = app.getSpinBox("Expected Bulbs")
app.setSpinBoxChangeFunction("Expected Bulbs", expectedPressed)
app.setSpinBoxWidth("Expected Bulbs", 2)
app.setSpinBoxTooltip("Expected Bulbs", EXPECTED_TIP)
app.setLabelTooltip("Expected Bulbs", EXPECTED_TIP)
app.addLabel("lbl2", " ", 1, 0)
app.setLabelBg("lbl2", "white")
app.addNamedCheckBox("Select All Bulbs", "Select All", 1, 2)
app.setCheckBoxChangeFunction("Select All", selectAllPressed)
app.addOptionBox("LIFX Bulbs", bulbList, 1, 1)
app.setOptionBoxChangeFunction("LIFX Bulbs", listChanged)
app.setSticky("n")
try:
app.addImageButton("Light", press, resource_path("bulb_off.gif"), 2, 2)
except Exception as e:
print ("Ignoring error:", str(e))
#app.errorBox("Error", str(e)+"\n\nTry selecting a bulb from the list first.")
#return
app.setButton( "Light", "Toggle Selected" )
#app.setButtonHeight ( "Light", 40 )
#app.hideButton("Light")
app.stopLabelFrame()
#-------------------------------------------------------------------------------
app.startLabelFrame("Scenes", 0, 1)
app.setSticky("news")
app.addEntry("Scene 1", 0, 0)
app.setEntryChangeFunction("Scene 1", SceneNameChanged)
app.addNamedButton("Save", "Save Scene 1", Scene, 0, 1)
app.addNamedButton("Restore", "Restore Scene 1", Scene, 0, 2)
app.addEntry("Scene 2", 1, 0)
app.setEntryChangeFunction("Scene 2", SceneNameChanged)
app.addNamedButton("Save", "Save Scene 2", Scene, 1, 1)
app.addNamedButton("Restore", "Restore Scene 2", Scene, 1, 2)
app.addEntry("Scene 3", 2, 0)
app.setEntryChangeFunction("Scene 3", SceneNameChanged)
app.addNamedButton("Save", "Save Scene 3", Scene, 2, 1)
app.addNamedButton("Restore", "Restore Scene 3", Scene, 2, 2)
app.stopLabelFrame()
#-------------------------------------------------------------------------------
#app.setButtonImage("picker", resource_path("colorpicker.gif"), align=None)
###
app.setSticky("ne")
app.startLabelFrame("All LAN Bulbs", 0, 2)
app.setSticky("new")
app.addButton("All Off", press, 2, 2)
app.addButton("All On", press, 3, 2)
app.addButton("All White", press, 4, 2)
app.addButton("All Rainbow", rainbow_press, 5, 2)
app.addButton("All Random", press, 6, 2)
#app.addButton("All Waveform", rainbow_press,6,2)
app.stopLabelFrame()
#-------------------------------------------
app.setSticky("sew")
app.startLabelFrame("HSBK Values", 1, 0)
app.setSticky("news")
app.setPadding(5, 5)
app.addButton("Pick Color", press, 3, 3)
#app.hideButton ( "Pick Color" )
app.addLabel("hueLab", "Hue (H):", 0, 0)
app.addLabel("satLab", "Saturation (S):", 1, 0)
app.addLabel("briLab", "Brightness (B):", 2, 0)
app.addLabel("kelLab", "Kelvin (K) Warmth:", 3, 0)
app.setLabelAlign("hueLab", "left")
app.setLabelAlign("satLab", "left")
app.setLabelAlign("briLab", "left")
app.setLabelAlign("kelLab", "left")
app.addSpinBox("hueSpin", list(reversed(range(65536))), 0, 1)
app.addSpinBox("satSpin", list(reversed(range(65536))), 1, 1)
app.addSpinBox("briSpin", list(reversed(range(65536))), 2, 1)
app.addSpinBox("kelSpin", list(reversed(range(2500, 9001, 1))), 3, 1)
app.setSpinBox("hueSpin", 0)
app.setSpinBox("satSpin", 0)
app.setSpinBox("briSpin", 0)
app.setSpinBox("kelSpin", 3500)
app.setSpinBoxWidth("hueSpin", 5)
app.setSpinBoxWidth("satSpin", 5)
app.setSpinBoxWidth("briSpin", 5)
app.setSpinBoxWidth("kelSpin", 5)
app.setSpinBoxChangeFunction("hueSpin", updateHSB)
app.setSpinBoxChangeFunction("satSpin", updateHSB)
app.setSpinBoxChangeFunction("briSpin", updateHSB)
app.setSpinBoxChangeFunction("kelSpin", updateHSB)
app.addScale("hueScale", 0, 2)
app.addScale("satScale", 1, 2)
app.addScale("briScale", 2, 2)
app.addScale("kelScale", 3, 2)
app.setScaleRange("hueScale", 0, 65535)
app.setScaleRange("satScale", 0, 65535)
app.setScaleRange("briScale", 0, 65535)
app.setScaleRange("kelScale", 2500, 9000)
app.setScaleChangeFunction("hueScale", updateHSB)
app.setScaleChangeFunction("satScale", updateHSB)
app.setScaleChangeFunction("briScale", updateHSB)
app.setScaleChangeFunction("kelScale", updateHSB)
app.startLabelFrame("Bulb Color", 0, 3, 3, 3)
app.setSticky("news")
app.addLabel("bulbcolor", "", 0, 3, 3, 3)
app.setLabel("bulbcolor", " ")
app.setLabelHeight("bulbcolor", 5)
app.setLabelWidth("bulbcolor", 10)
app.setLabelBg("bulbcolor", "gray")
app.stopLabelFrame()
app.stopLabelFrame()
#-------------------------------------------
app.startLabelFrame("Waveform", 1, 1, 5, 1)
#app.setFrameWidth("Waveform",20)
#app.setSticky("news")
app.setSticky("w")
app.addRadioButton("waveform", "Saw")
app.addRadioButton("waveform", "Sine")
app.addRadioButton("waveform", "HalfSine")
app.addRadioButton("waveform", "Triangle")
app.addRadioButton("waveform", "Pulse (Strobe)")
app.setSticky("e")
app.addCheckBox("Transient", 0, 2)
app.setCheckBox("Transient")
app.addButton("Secondary Color", press, 1, 1)
app.addLabel("lblwaveformcolor", " ", 1, 2)
app.setLabelBg("lblwaveformcolor", "#FF0000")
app.setLabelWidth("lblwaveformcolor", 20)
app.addLabelEntry("Period(ms)", 2, 2)
app.setEntryWidth("Period(ms)", 6)
app.setEntry("Period(ms)", "500")
app.addLabelEntry(CYCLES, 3, 2)
app.setEntryWidth(CYCLES, 6)
app.setEntry(CYCLES, "5")
app.addLabelEntry("Duty Cycle", 4, 2)
app.setEntryWidth("Duty Cycle", 6)
app.setEntry("Duty Cycle", "0")
app.setEntryTooltip("Duty Cycle", DUTY_CYCLE_TIP)
app.setLabelTooltip("Duty Cycle", DUTY_CYCLE_TIP)
app.setEntryTooltip("Cycles", CYCLES_TIP)
app.setLabelTooltip(CYCLES, CYCLES_TIP)
app.setEntryTooltip("Period(ms)", PERIOD_TIP)
app.setLabelTooltip("Period(ms)", PERIOD_TIP)
app.setCheckBoxTooltip("Transient", TRANSIENT_TIP)
app.setSticky("ew")
app.addButton("Execute", press, 5, 0, colspan=3)
app.setButtonBg("Execute", "cyan")
app.stopLabelFrame()
#-------------------------------------------
app.stopLabelFrame()
#----------------------------------------------------
#app.setSticky("news")
app.startLabelFrame("Bulb Details", 5, 0)
app.setSticky("ew")
app.addScrolledTextArea("Result", 0, 0)
#app.setTextAreaWidth("Result", 45)
app.setTextAreaHeight("Result", 25)
app.setTextArea("Result", test_string)
app.stopLabelFrame()
#-----------------------------------------------------
#-------------------------------------------
app.startLabelFrame(FOLLOW_DESKTOP, 2, 0)
#app.setSticky("n")
modeList = ["-Select Region- "]
modeList.append("Whole Screen")
modeList.append("Rectangular Region")
app.setSticky("w")
app.addCheckBox(FOLLOW_DESKTOP, 0, 0)
app.setCheckBoxChangeFunction(FOLLOW_DESKTOP, followDesktopPressed)
app.addOptionBox(DESKTOP_MODE, modeList, 0, 1)
app.setOptionBoxChangeFunction(DESKTOP_MODE, modeChanged)
app.setOptionBox(DESKTOP_MODE, "Whole Screen", callFunction=False)
app.addLabelEntry(TRANSITION_TIME, 0, 2)
app.setEntryWidth(TRANSITION_TIME, 6)
app.setEntry(TRANSITION_TIME, TRANSITION_TIME_DEFAULT)
#app.startLabelFrame("Region Color", 0, 3)
app.addLabel(REGION_COLOR, "", 1, 0, colspan=5)
app.setLabel(REGION_COLOR, " Desktop Region's Dominant Color")
app.setLabelHeight(REGION_COLOR, 1)
app.setLabelBg(REGION_COLOR, "gray")
app.hideLabel(REGION_COLOR)
app.setSticky("e")
app.addCheckBox(MAX_SATURATION, 0, 3)
app.addCheckBox(MAX_BRIGHTNESS, 0, 4)
app.setCheckBoxChangeFunction(MAX_SATURATION, maxPressed)
app.setCheckBoxChangeFunction(MAX_BRIGHTNESS, maxPressed)
app.addCheckBox("Evening Mode",0,5)
app.setEntryTooltip(TRANSITION_TIME, TRANSITION_TIME_TIP)
app.setLabelTooltip(TRANSITION_TIME, TRANSITION_TIME_TIP)
app.setCheckBoxTooltip(FOLLOW_DESKTOP, FOLLOW_DESKTOP_TIP)
app.setOptionBoxTooltip(DESKTOP_MODE, DESKTOP_MODE_TIP)
app.stopLabelFrame()
if not os.path.exists(CONFIG):
print("Creating .ini file")
open(CONFIG, 'w').close()
config = ConfigObj(CONFIG)
config['expectedbulbs'] = 0
config['Scene 1'] = "Scene 1"
config['Scene 2'] = "Scene 2"
config['Scene 3'] = "Scene 3"
config['transtime'] = 200
config['waveform'] = 'Saw'
config['transient'] = True
config['period'] = 500
config['cycles'] = 5
config['duty_cycle'] = 0
config['secondary_color'] = "#FF0000"
config['maxSaturation'] = False
config['maxBrightness'] = False
config['is_evening'] = False
config.write()
config = ConfigObj(CONFIG)
print("config:", config)
if 'maxSaturation' in config:
maxSaturation = (config['maxSaturation']=='True')
app.setCheckBox(MAX_SATURATION,ticked=(config['maxSaturation']=='True'),callFunction=False)
if 'maxBrightness' in config:
maxBrightness = (config['maxBrightness']=='True')
app.setCheckBox(MAX_BRIGHTNESS,ticked=(config['maxBrightness']=='True'),callFunction=False)
if 'is_evening' in config:
app.setCheckBox("Evening Mode",ticked=(config['is_evening']=='True'),callFunction=False)
if 'waveform' in config:
app.setRadioButton("waveform",config['waveform'])
if 'transient' in config:
app.setCheckBox("Transient",config['transient'])
if 'period' in config:
app.setEntry("Period(ms)",config['period'])
if 'cycles' in config:
app.setEntry(CYCLES,config['cycles'])
if 'duty_cycle' in config:
app.setEntry("Duty Cycle",config['duty_cycle'])
if 'secondary_color' in config:
app.setLabelBg("lblwaveformcolor", config['secondary_color'])
if 'expectedbulbs' in config:
app.setSpinBox("Expected Bulbs", config['expectedbulbs'])
if 'transtime' in config:
app.setEntry(TRANSITION_TIME, config['transtime'])
if 'Scene 1' in config:
app.setEntry("Scene 1", config["Scene 1"], callFunction=False)
if 'Scene 2' in config:
app.setEntry("Scene 2", config["Scene 2"], callFunction=False)
if 'Scene 3' in config:
app.setEntry("Scene 3", config["Scene 3"], callFunction=False)
if os.path.exists(PICKLE):
bulbPickle = pkl.load(open(PICKLE, "rb"))
bulbList.clear()
bulbList.append("-Select Bulb-")
for i, bulb in enumerate(bulbPickle):
light = lifxlan.Light(bulb['mac'], bulb['ip'])
light.label = bulb['label']
bulbs.append(light)
bulbList.append(bulb['label'])
if len(bulbs) > 0:
app.clearOptionBox("LIFX Bulbs", callFunction=False)
app.changeOptionBox("LIFX Bulbs", bulbList, callFunction=False)
app.setLabelBg("lbl2", mygreen)
app.hideLabel("f1")
app.setLabel("lbl2", "Recalled " + str(len(bulbs)) + " bulbs")
app.setCheckBox("Select All")
lan = lifxlan.LifxLAN()
app.go()
| true | true |
1c3b0fd15cdb829b31c47e897d0ef87b98cab386 | 3,472 | py | Python | IoTSocketServer/iot_logger.py | senceryazici/iot-socket-application-nodemcu | 41449fbfd345bca40ea140011a33b34fe00a3029 | [
"MIT"
] | null | null | null | IoTSocketServer/iot_logger.py | senceryazici/iot-socket-application-nodemcu | 41449fbfd345bca40ea140011a33b34fe00a3029 | [
"MIT"
] | null | null | null | IoTSocketServer/iot_logger.py | senceryazici/iot-socket-application-nodemcu | 41449fbfd345bca40ea140011a33b34fe00a3029 | [
"MIT"
] | null | null | null | import logging
import time
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
# The background is set with 40 plus the number of the color, and the foreground with 30
# These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color=True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
class IotServerLogger(logging.Logger):
FORMAT = "[$BOLD%(name)-20s$RESET][%(levelname)-18s] %(message)s ($BOLD%(filename)s$RESET:%(lineno)d)"
COLOR_FORMAT = formatter_message(FORMAT, True)
STANDARD_FORMAT = "[%(asctime)s][%(levelname)-18s] %(message)s"
def __init__(self, name, log_path):
logging.Logger.__init__(self, name, logging.DEBUG)
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
time_string = time.strftime("iot-host-%d-%m-%Y-%H:%M:%S", time.localtime())
t = time.time()
millisecond = (t - int(t)) * 1000
time_string += ":" + str(millisecond)
import os
a = os.path.abspath(log_path)
print(a)
if not os.path.exists(log_path + "/log"):
os.system("mkdir -p " + log_path + "/log")
fh = logging.FileHandler(log_path + "/log/" + time_string + '.log')
file_formatter = logging.Formatter(self.STANDARD_FORMAT)
fh.setFormatter(file_formatter)
fh.setLevel(logging.DEBUG)
self.addHandler(fh)
self.addHandler(console)
return
class IotClientLogger(logging.Logger):
FORMAT = "[$BOLD%(name)-20s$RESET][%(levelname)-18s] %(message)s ($BOLD%(filename)s$RESET:%(lineno)d)"
COLOR_FORMAT = formatter_message(FORMAT, True)
STANDARD_FORMAT = "[%(asctime)s][%(levelname)-18s] %(message)s"
def __init__(self, name, log_path):
logging.Logger.__init__(self, name, logging.DEBUG)
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
time_string = time.strftime(name + "-%d-%m-%Y-%H:%M:%S", time.localtime())
t = time.time()
millisecond = (t - int(t)) * 1000
time_string += ":" + str(millisecond)
import os
if not os.path.exists(log_path + "/log"):
os.system("mkdir -p " + log_path + "/log")
fh = logging.FileHandler(log_path + "/log/" + time_string + '.log')
file_formatter = logging.Formatter(self.STANDARD_FORMAT)
fh.setFormatter(file_formatter)
fh.setLevel(logging.DEBUG)
self.addHandler(fh)
self.addHandler(console)
return
| 33.066667 | 107 | 0.635657 | import logging
import time
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color=True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
class IotServerLogger(logging.Logger):
FORMAT = "[$BOLD%(name)-20s$RESET][%(levelname)-18s] %(message)s ($BOLD%(filename)s$RESET:%(lineno)d)"
COLOR_FORMAT = formatter_message(FORMAT, True)
STANDARD_FORMAT = "[%(asctime)s][%(levelname)-18s] %(message)s"
def __init__(self, name, log_path):
logging.Logger.__init__(self, name, logging.DEBUG)
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
time_string = time.strftime("iot-host-%d-%m-%Y-%H:%M:%S", time.localtime())
t = time.time()
millisecond = (t - int(t)) * 1000
time_string += ":" + str(millisecond)
import os
a = os.path.abspath(log_path)
print(a)
if not os.path.exists(log_path + "/log"):
os.system("mkdir -p " + log_path + "/log")
fh = logging.FileHandler(log_path + "/log/" + time_string + '.log')
file_formatter = logging.Formatter(self.STANDARD_FORMAT)
fh.setFormatter(file_formatter)
fh.setLevel(logging.DEBUG)
self.addHandler(fh)
self.addHandler(console)
return
class IotClientLogger(logging.Logger):
FORMAT = "[$BOLD%(name)-20s$RESET][%(levelname)-18s] %(message)s ($BOLD%(filename)s$RESET:%(lineno)d)"
COLOR_FORMAT = formatter_message(FORMAT, True)
STANDARD_FORMAT = "[%(asctime)s][%(levelname)-18s] %(message)s"
def __init__(self, name, log_path):
logging.Logger.__init__(self, name, logging.DEBUG)
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
time_string = time.strftime(name + "-%d-%m-%Y-%H:%M:%S", time.localtime())
t = time.time()
millisecond = (t - int(t)) * 1000
time_string += ":" + str(millisecond)
import os
if not os.path.exists(log_path + "/log"):
os.system("mkdir -p " + log_path + "/log")
fh = logging.FileHandler(log_path + "/log/" + time_string + '.log')
file_formatter = logging.Formatter(self.STANDARD_FORMAT)
fh.setFormatter(file_formatter)
fh.setLevel(logging.DEBUG)
self.addHandler(fh)
self.addHandler(console)
return
| true | true |
1c3b101548134ebcc6df1626e2c04c463e13c2e3 | 405 | py | Python | backend/quadrict_30065/wsgi.py | crowdbotics-apps/quadrict-30065 | f214ef8583b95ead2dcb36eb652a60ace7670a3b | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/quadrict_30065/wsgi.py | crowdbotics-apps/quadrict-30065 | f214ef8583b95ead2dcb36eb652a60ace7670a3b | [
"FTL",
"AML",
"RSA-MD"
] | 13 | 2021-08-30T01:19:08.000Z | 2021-10-05T18:55:12.000Z | backend/quadrict_30065/wsgi.py | crowdbotics-apps/quadrict-30065 | f214ef8583b95ead2dcb36eb652a60ace7670a3b | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for quadrict_30065 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'quadrict_30065.settings')
application = get_wsgi_application()
| 23.823529 | 78 | 0.792593 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'quadrict_30065.settings')
application = get_wsgi_application()
| true | true |
1c3b102f0650f6539202d916746b5ca4a49f734e | 4,773 | py | Python | 04_Support_Vector_Machines/02_Working_with_Linear_SVMs/02_linear_svm.py | hustcxl/tensorflow_cookbook | 26673fe74eb9c9eb0824e2dfdf5ef3090bf09a57 | [
"MIT"
] | 93 | 2018-05-27T08:07:02.000Z | 2022-02-28T11:18:08.000Z | 04_Support_Vector_Machines/02_Working_with_Linear_SVMs/02_linear_svm.py | tensorzhangzheng/tensorflow_cookbook | 4f57ea4ad79c8111fb29bad3da5d151858c6a050 | [
"MIT"
] | null | null | null | 04_Support_Vector_Machines/02_Working_with_Linear_SVMs/02_linear_svm.py | tensorzhangzheng/tensorflow_cookbook | 4f57ea4ad79c8111fb29bad3da5d151858c6a050 | [
"MIT"
] | 75 | 2018-06-22T08:02:03.000Z | 2022-03-10T14:38:44.000Z | # Linear Support Vector Machine: Soft Margin
# ----------------------------------
#
# This function shows how to use TensorFlow to
# create a soft margin SVM
#
# We will use the iris data, specifically:
# x1 = Sepal Length
# x2 = Petal Width
# Class 1 : I. setosa
# Class -1: not I. setosa
#
# We know here that x and y are linearly seperable
# for I. setosa classification.
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Set random seeds
np.random.seed(7)
tf.set_random_seed(7)
# Create graph
sess = tf.Session()
# Load the data
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris = datasets.load_iris()
x_vals = np.array([[x[0], x[3]] for x in iris.data])
y_vals = np.array([1 if y == 0 else -1 for y in iris.target])
# Split data into train/test sets
train_indices = np.random.choice(len(x_vals),
round(len(x_vals)*0.9),
replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Declare batch size
batch_size = 135
# Initialize placeholders
x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# Create variables for linear regression
A = tf.Variable(tf.random_normal(shape=[2, 1]))
b = tf.Variable(tf.random_normal(shape=[1, 1]))
# Declare model operations
model_output = tf.subtract(tf.matmul(x_data, A), b)
# Declare vector L2 'norm' function squared
l2_norm = tf.reduce_sum(tf.square(A))
# Declare loss function
# Loss = max(0, 1-pred*actual) + alpha * L2_norm(A)^2
# L2 regularization parameter, alpha
alpha = tf.constant([0.01])
# Margin term in loss
classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output, y_target))))
# Put terms together
loss = tf.add(classification_term, tf.multiply(alpha, l2_norm))
# Declare prediction function
prediction = tf.sign(model_output)
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32))
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.01)
train_step = my_opt.minimize(loss)
# Initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# Training loop
loss_vec = []
train_accuracy = []
test_accuracy = []
for i in range(500):
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
rand_x = x_vals_train[rand_index]
rand_y = np.transpose([y_vals_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
train_acc_temp = sess.run(accuracy, feed_dict={
x_data: x_vals_train,
y_target: np.transpose([y_vals_train])})
train_accuracy.append(train_acc_temp)
test_acc_temp = sess.run(accuracy, feed_dict={
x_data: x_vals_test,
y_target: np.transpose([y_vals_test])})
test_accuracy.append(test_acc_temp)
if (i + 1) % 100 == 0:
print('Step #{} A = {}, b = {}'.format(
str(i+1),
str(sess.run(A)),
str(sess.run(b))
))
print('Loss = ' + str(temp_loss))
# Extract coefficients
[[a1], [a2]] = sess.run(A)
[[b]] = sess.run(b)
slope = -a2/a1
y_intercept = b/a1
# Extract x1 and x2 vals
x1_vals = [d[1] for d in x_vals]
# Get best fit line
best_fit = []
for i in x1_vals:
best_fit.append(slope*i+y_intercept)
# Separate I. setosa
setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == 1]
setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == 1]
not_setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == -1]
not_setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == -1]
# Plot data and line
plt.plot(setosa_x, setosa_y, 'o', label='I. setosa')
plt.plot(not_setosa_x, not_setosa_y, 'x', label='Non-setosa')
plt.plot(x1_vals, best_fit, 'r-', label='Linear Separator', linewidth=3)
plt.ylim([0, 10])
plt.legend(loc='lower right')
plt.title('Sepal Length vs Pedal Width')
plt.xlabel('Pedal Width')
plt.ylabel('Sepal Length')
plt.show()
# Plot train/test accuracies
plt.plot(train_accuracy, 'k-', label='Training Accuracy')
plt.plot(test_accuracy, 'r--', label='Test Accuracy')
plt.title('Train and Test Set Accuracies')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
# Plot loss over time
plt.plot(loss_vec, 'k-')
plt.title('Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
| 29.645963 | 106 | 0.691599 |
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops
ops.reset_default_graph()
np.random.seed(7)
tf.set_random_seed(7)
sess = tf.Session()
iris = datasets.load_iris()
x_vals = np.array([[x[0], x[3]] for x in iris.data])
y_vals = np.array([1 if y == 0 else -1 for y in iris.target])
train_indices = np.random.choice(len(x_vals),
round(len(x_vals)*0.9),
replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
batch_size = 135
x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
A = tf.Variable(tf.random_normal(shape=[2, 1]))
b = tf.Variable(tf.random_normal(shape=[1, 1]))
model_output = tf.subtract(tf.matmul(x_data, A), b)
l2_norm = tf.reduce_sum(tf.square(A))
alpha = tf.constant([0.01])
classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output, y_target))))
loss = tf.add(classification_term, tf.multiply(alpha, l2_norm))
prediction = tf.sign(model_output)
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32))
my_opt = tf.train.GradientDescentOptimizer(0.01)
train_step = my_opt.minimize(loss)
init = tf.global_variables_initializer()
sess.run(init)
loss_vec = []
train_accuracy = []
test_accuracy = []
for i in range(500):
rand_index = np.random.choice(len(x_vals_train), size=batch_size)
rand_x = x_vals_train[rand_index]
rand_y = np.transpose([y_vals_train[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
train_acc_temp = sess.run(accuracy, feed_dict={
x_data: x_vals_train,
y_target: np.transpose([y_vals_train])})
train_accuracy.append(train_acc_temp)
test_acc_temp = sess.run(accuracy, feed_dict={
x_data: x_vals_test,
y_target: np.transpose([y_vals_test])})
test_accuracy.append(test_acc_temp)
if (i + 1) % 100 == 0:
print('Step #{} A = {}, b = {}'.format(
str(i+1),
str(sess.run(A)),
str(sess.run(b))
))
print('Loss = ' + str(temp_loss))
[[a1], [a2]] = sess.run(A)
[[b]] = sess.run(b)
slope = -a2/a1
y_intercept = b/a1
x1_vals = [d[1] for d in x_vals]
best_fit = []
for i in x1_vals:
best_fit.append(slope*i+y_intercept)
setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == 1]
setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == 1]
not_setosa_x = [d[1] for i, d in enumerate(x_vals) if y_vals[i] == -1]
not_setosa_y = [d[0] for i, d in enumerate(x_vals) if y_vals[i] == -1]
plt.plot(setosa_x, setosa_y, 'o', label='I. setosa')
plt.plot(not_setosa_x, not_setosa_y, 'x', label='Non-setosa')
plt.plot(x1_vals, best_fit, 'r-', label='Linear Separator', linewidth=3)
plt.ylim([0, 10])
plt.legend(loc='lower right')
plt.title('Sepal Length vs Pedal Width')
plt.xlabel('Pedal Width')
plt.ylabel('Sepal Length')
plt.show()
plt.plot(train_accuracy, 'k-', label='Training Accuracy')
plt.plot(test_accuracy, 'r--', label='Test Accuracy')
plt.title('Train and Test Set Accuracies')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
plt.plot(loss_vec, 'k-')
plt.title('Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
| true | true |
1c3b10e4010c29804569e7af288359aa014a8e66 | 662 | py | Python | sila_cetoni/motioncontrol/axis/sila/axis_service/generated/axissystemcontrolservice/__init__.py | CETONI-Software/sila_cetoni_motioncontrol | d216c7f673eabe9fdba7833fd476330a5b0ef782 | [
"BSD-3-Clause"
] | 1 | 2022-03-08T13:39:52.000Z | 2022-03-08T13:39:52.000Z | sila_cetoni/motioncontrol/axis/sila/axis_service/generated/axissystemcontrolservice/__init__.py | CETONI-Software/sila_cetoni_motioncontrol | d216c7f673eabe9fdba7833fd476330a5b0ef782 | [
"BSD-3-Clause"
] | null | null | null | sila_cetoni/motioncontrol/axis/sila/axis_service/generated/axissystemcontrolservice/__init__.py | CETONI-Software/sila_cetoni_motioncontrol | d216c7f673eabe9fdba7833fd476330a5b0ef782 | [
"BSD-3-Clause"
] | null | null | null | # Generated by sila2.code_generator; sila2.__version__: 0.8.0
from .axissystemcontrolservice_base import AxisSystemControlServiceBase
from .axissystemcontrolservice_client import AxisSystemControlServiceClient
from .axissystemcontrolservice_feature import AxisSystemControlServiceFeature
from .axissystemcontrolservice_types import (
ClearFaultState_Responses,
DisableAxisSystem_Responses,
EnableAxisSystem_Responses,
)
__all__ = [
"AxisSystemControlServiceBase",
"AxisSystemControlServiceFeature",
"AxisSystemControlServiceClient",
"EnableAxisSystem_Responses",
"DisableAxisSystem_Responses",
"ClearFaultState_Responses",
]
| 34.842105 | 77 | 0.833837 |
from .axissystemcontrolservice_base import AxisSystemControlServiceBase
from .axissystemcontrolservice_client import AxisSystemControlServiceClient
from .axissystemcontrolservice_feature import AxisSystemControlServiceFeature
from .axissystemcontrolservice_types import (
ClearFaultState_Responses,
DisableAxisSystem_Responses,
EnableAxisSystem_Responses,
)
__all__ = [
"AxisSystemControlServiceBase",
"AxisSystemControlServiceFeature",
"AxisSystemControlServiceClient",
"EnableAxisSystem_Responses",
"DisableAxisSystem_Responses",
"ClearFaultState_Responses",
]
| true | true |
1c3b113d2ea69701dba48af5b1f804af8611acd0 | 416 | py | Python | filestorage/migrations/0002_auto_20200618_1223.py | AlexGolovaschenko/OwenAgriculture | 4d393da3736d0a71b1d25b720ed16af38013b682 | [
"Apache-2.0"
] | null | null | null | filestorage/migrations/0002_auto_20200618_1223.py | AlexGolovaschenko/OwenAgriculture | 4d393da3736d0a71b1d25b720ed16af38013b682 | [
"Apache-2.0"
] | 7 | 2021-03-19T03:36:56.000Z | 2022-01-13T02:44:37.000Z | filestorage/migrations/0002_auto_20200618_1223.py | AlexGolovaschenko/OwenAgriculture | 4d393da3736d0a71b1d25b720ed16af38013b682 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.6 on 2020-06-18 09:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filestorage', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='file',
name='file',
field=models.FileField(upload_to='filestorage/%Y/%m/%d/', verbose_name='Файл'),
),
]
| 21.894737 | 91 | 0.59375 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filestorage', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='file',
name='file',
field=models.FileField(upload_to='filestorage/%Y/%m/%d/', verbose_name='Файл'),
),
]
| true | true |
1c3b11844b45ea5660db5b77a3b02ff5a2712589 | 11,063 | py | Python | numpy/__init__.py | ruppysuppy/numpy | a89f3ebaec7441f4ba5e30eb07206c2a7269778e | [
"BSD-3-Clause"
] | 2 | 2022-02-02T05:40:47.000Z | 2022-03-05T11:04:24.000Z | numpy/__init__.py | ruppysuppy/numpy | a89f3ebaec7441f4ba5e30eb07206c2a7269778e | [
"BSD-3-Clause"
] | null | null | null | numpy/__init__.py | ruppysuppy/numpy | a89f3ebaec7441f4ba5e30eb07206c2a7269778e | [
"BSD-3-Clause"
] | null | null | null | """
NumPy
=====
Provides
1. An array object of arbitrary homogeneous items
2. Fast mathematical operations over arrays
3. Linear Algebra, Fourier Transforms, Random Number Generation
How to use the documentation
----------------------------
Documentation is available in two forms: docstrings provided
with the code, and a loose standing reference guide, available from
`the NumPy homepage <https://www.scipy.org>`_.
We recommend exploring the docstrings using
`IPython <https://ipython.org>`_, an advanced Python shell with
TAB-completion and introspection capabilities. See below for further
instructions.
The docstring examples assume that `numpy` has been imported as `np`::
>>> import numpy as np
Code snippets are indicated by three greater-than signs::
>>> x = 42
>>> x = x + 1
Use the built-in ``help`` function to view a function's docstring::
>>> help(np.sort)
... # doctest: +SKIP
For some objects, ``np.info(obj)`` may provide additional help. This is
particularly true if you see the line "Help on ufunc object:" at the top
of the help() page. Ufuncs are implemented in C, not Python, for speed.
The native Python help() does not know how to view their help, but our
np.info() function does.
To search for documents containing a keyword, do::
>>> np.lookfor('keyword')
... # doctest: +SKIP
General-purpose documents like a glossary and help on the basic concepts
of numpy are available under the ``doc`` sub-module::
>>> from numpy import doc
>>> help(doc)
... # doctest: +SKIP
Available subpackages
---------------------
doc
Topical documentation on broadcasting, indexing, etc.
lib
Basic functions used by several sub-packages.
random
Core Random Tools
linalg
Core Linear Algebra Tools
fft
Core FFT routines
polynomial
Polynomial tools
testing
NumPy testing tools
f2py
Fortran to Python Interface Generator.
distutils
Enhancements to distutils with support for
Fortran compilers support and more.
Utilities
---------
test
Run numpy unittests
show_config
Show numpy build configuration
dual
Overwrite certain functions with high-performance SciPy tools.
Note: `numpy.dual` is deprecated. Use the functions from NumPy or Scipy
directly instead of importing them from `numpy.dual`.
matlib
Make everything matrices.
__version__
NumPy version string
Viewing documentation using IPython
-----------------------------------
Start IPython with the NumPy profile (``ipython -p numpy``), which will
import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
paste examples into the shell. To see which functions are available in
`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
down the list. To view the docstring for a function, use
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
the source code).
Copies vs. in-place operation
-----------------------------
Most of the functions in `numpy` return a copy of the array argument
(e.g., `np.sort`). In-place versions of these functions are often
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
Exceptions to this rule are documented.
"""
import sys
import warnings
from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning
from ._globals import _NoValue
# We first need to detect if we're being called as part of the numpy setup
# procedure itself in a reliable manner.
try:
__NUMPY_SETUP__
except NameError:
__NUMPY_SETUP__ = False
if __NUMPY_SETUP__:
sys.stderr.write('Running from numpy source directory.\n')
else:
try:
from numpy.__config__ import show as show_config
except ImportError:
msg = """Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there."""
raise ImportError(msg)
from .version import git_revision as __git_revision__
from .version import version as __version__
__all__ = ['ModuleDeprecationWarning',
'VisibleDeprecationWarning']
# Allow distributors to run custom init code
from . import _distributor_init
from . import core
from .core import *
from . import compat
from . import lib
# NOTE: to be revisited following future namespace cleanup.
# See gh-14454 and gh-15672 for discussion.
from .lib import *
from . import linalg
from . import fft
from . import polynomial
from . import random
from . import ctypeslib
from . import ma
from . import matrixlib as _mat
from .matrixlib import *
# Make these accessible from numpy name-space
# but not imported in from numpy import *
# TODO[gh-6103]: Deprecate these
from builtins import bool, int, float, complex, object, str
from .compat import long, unicode
from .core import round, abs, max, min
# now that numpy modules are imported, can initialize limits
core.getlimits._register_known_types()
__all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
# These are added by `from .core import *` and `core.__all__`, but we
# overwrite them above with builtins we do _not_ want to export.
__all__.remove('long')
__all__.remove('unicode')
# Remove things that are in the numpy.lib but not in the numpy namespace
# Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
# that prevents adding more things to the main namespace by accident.
# The list below will grow until the `from .lib import *` fixme above is
# taken care of
__all__.remove('Arrayterator')
del Arrayterator
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
# oldnumeric and numarray were removed in 1.9. In case some packages import
# but do not use them, we define them here for backward compatibility.
oldnumeric = 'removed'
numarray = 'removed'
if sys.version_info[:2] >= (3, 7):
# Importing Tester requires importing all of UnitTest which is not a
# cheap import Since it is mainly used in test suits, we lazy import it
# here to save on the order of 10 ms of import time for most users
#
# The previous way Tester was imported also had a side effect of adding
# the full `numpy.testing` namespace
#
# module level getattr is only supported in 3.7 onwards
# https://www.python.org/dev/peps/pep-0562/
def __getattr__(attr):
if attr == 'testing':
import numpy.testing as testing
return testing
elif attr == 'Tester':
from .testing import Tester
return Tester
else:
raise AttributeError("module {!r} has no attribute "
"{!r}".format(__name__, attr))
def __dir__():
return list(globals().keys() | {'Tester', 'testing'})
else:
# We don't actually use this ourselves anymore, but I'm not 100% sure that
# no-one else in the world is using it (though I hope not)
from .testing import Tester
# Pytest testing
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
def _sanity_check():
"""
Quick sanity checks for common bugs caused by environment.
There are some cases e.g. with wrong BLAS ABI that cause wrong
results under specific runtime conditions that are not necessarily
achieved during test suite runs, and it is useful to catch those early.
See https://github.com/numpy/numpy/issues/8577 and other
similar bug reports.
"""
try:
x = ones(2, dtype=float32)
if not abs(x.dot(x) - 2.0) < 1e-5:
raise AssertionError()
except AssertionError:
msg = ("The current Numpy installation ({!r}) fails to "
"pass simple sanity checks. This can be caused for example "
"by incorrect BLAS library being linked in, or by mixing "
"package managers (pip, conda, apt, ...). Search closed "
"numpy issues for similar problems.")
raise RuntimeError(msg.format(__file__))
_sanity_check()
del _sanity_check
def _mac_os_check():
"""
Quick Sanity check for Mac OS look for accelerate build bugs.
Testing numpy polyfit calls init_dgelsd(LAPACK)
"""
try:
c = array([3., 2., 1.])
x = linspace(0, 2, 5)
y = polyval(c, x)
_ = polyfit(x, y, 2, cov=True)
except ValueError:
pass
import sys
if sys.platform == "darwin":
with warnings.catch_warnings(record=True) as w:
_mac_os_check()
# Throw runtime error, if the test failed Check for warning and error_message
error_message = ""
if len(w) > 0:
error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message))
msg = (
"Polyfit sanity test emitted a warning, most likely due "
"to using a buggy Accelerate backend. If you compiled "
"yourself, more information is available at "
"https://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries "
"Otherwise report this to the vendor "
"that provided NumPy.\n{}\n".format(error_message))
raise RuntimeError(msg)
del _mac_os_check
# We usually use madvise hugepages support, but on some old kernels it
# is slow and thus better avoided.
# Specifically kernel version 4.6 had a bug fix which probably fixed this:
# https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
import os
use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
if sys.platform == "linux" and use_hugepage is None:
use_hugepage = 1
kernel_version = os.uname().release.split(".")[:2]
kernel_version = tuple(int(v) for v in kernel_version)
if kernel_version < (4, 6):
use_hugepage = 0
elif use_hugepage is None:
# This is not Linux, so it should not matter, just enable anyway
use_hugepage = 1
else:
use_hugepage = int(use_hugepage)
# Note that this will currently only make a difference on Linux
core.multiarray._set_madvise_hugepage(use_hugepage)
| 35.687097 | 104 | 0.65823 | import sys
import warnings
from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning
from ._globals import _NoValue
# procedure itself in a reliable manner.
try:
__NUMPY_SETUP__
except NameError:
__NUMPY_SETUP__ = False
if __NUMPY_SETUP__:
sys.stderr.write('Running from numpy source directory.\n')
else:
try:
from numpy.__config__ import show as show_config
except ImportError:
msg = """Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there."""
raise ImportError(msg)
from .version import git_revision as __git_revision__
from .version import version as __version__
__all__ = ['ModuleDeprecationWarning',
'VisibleDeprecationWarning']
# Allow distributors to run custom init code
from . import _distributor_init
from . import core
from .core import *
from . import compat
from . import lib
# NOTE: to be revisited following future namespace cleanup.
# See gh-14454 and gh-15672 for discussion.
from .lib import *
from . import linalg
from . import fft
from . import polynomial
from . import random
from . import ctypeslib
from . import ma
from . import matrixlib as _mat
from .matrixlib import *
# Make these accessible from numpy name-space
# but not imported in from numpy import *
# TODO[gh-6103]: Deprecate these
from builtins import bool, int, float, complex, object, str
from .compat import long, unicode
from .core import round, abs, max, min
# now that numpy modules are imported, can initialize limits
core.getlimits._register_known_types()
__all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
# These are added by `from .core import *` and `core.__all__`, but we
# overwrite them above with builtins we do _not_ want to export.
__all__.remove('long')
__all__.remove('unicode')
# Remove things that are in the numpy.lib but not in the numpy namespace
# Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
# that prevents adding more things to the main namespace by accident.
# The list below will grow until the `from .lib import *` fixme above is
# taken care of
__all__.remove('Arrayterator')
del Arrayterator
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
# oldnumeric and numarray were removed in 1.9. In case some packages import
# but do not use them, we define them here for backward compatibility.
oldnumeric = 'removed'
numarray = 'removed'
if sys.version_info[:2] >= (3, 7):
# Importing Tester requires importing all of UnitTest which is not a
# cheap import Since it is mainly used in test suits, we lazy import it
# here to save on the order of 10 ms of import time for most users
#
# The previous way Tester was imported also had a side effect of adding
# the full `numpy.testing` namespace
#
# module level getattr is only supported in 3.7 onwards
# https://www.python.org/dev/peps/pep-0562/
def __getattr__(attr):
if attr == 'testing':
import numpy.testing as testing
return testing
elif attr == 'Tester':
from .testing import Tester
return Tester
else:
raise AttributeError("module {!r} has no attribute "
"{!r}".format(__name__, attr))
def __dir__():
return list(globals().keys() | {'Tester', 'testing'})
else:
# We don't actually use this ourselves anymore, but I'm not 100% sure that
# no-one else in the world is using it (though I hope not)
from .testing import Tester
# Pytest testing
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
def _sanity_check():
"""
Quick sanity checks for common bugs caused by environment.
There are some cases e.g. with wrong BLAS ABI that cause wrong
results under specific runtime conditions that are not necessarily
achieved during test suite runs, and it is useful to catch those early.
See https://github.com/numpy/numpy/issues/8577 and other
similar bug reports.
"""
try:
x = ones(2, dtype=float32)
if not abs(x.dot(x) - 2.0) < 1e-5:
raise AssertionError()
except AssertionError:
msg = ("The current Numpy installation ({!r}) fails to "
"pass simple sanity checks. This can be caused for example "
"by incorrect BLAS library being linked in, or by mixing "
"package managers (pip, conda, apt, ...). Search closed "
"numpy issues for similar problems.")
raise RuntimeError(msg.format(__file__))
_sanity_check()
del _sanity_check
def _mac_os_check():
"""
Quick Sanity check for Mac OS look for accelerate build bugs.
Testing numpy polyfit calls init_dgelsd(LAPACK)
"""
try:
c = array([3., 2., 1.])
x = linspace(0, 2, 5)
y = polyval(c, x)
_ = polyfit(x, y, 2, cov=True)
except ValueError:
pass
import sys
if sys.platform == "darwin":
with warnings.catch_warnings(record=True) as w:
_mac_os_check()
# Throw runtime error, if the test failed Check for warning and error_message
error_message = ""
if len(w) > 0:
error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message))
msg = (
"Polyfit sanity test emitted a warning, most likely due "
"to using a buggy Accelerate backend. If you compiled "
"yourself, more information is available at "
"https://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries "
"Otherwise report this to the vendor "
"that provided NumPy.\n{}\n".format(error_message))
raise RuntimeError(msg)
del _mac_os_check
# We usually use madvise hugepages support, but on some old kernels it
# is slow and thus better avoided.
# Specifically kernel version 4.6 had a bug fix which probably fixed this:
# https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff
import os
use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
if sys.platform == "linux" and use_hugepage is None:
use_hugepage = 1
kernel_version = os.uname().release.split(".")[:2]
kernel_version = tuple(int(v) for v in kernel_version)
if kernel_version < (4, 6):
use_hugepage = 0
elif use_hugepage is None:
# This is not Linux, so it should not matter, just enable anyway
use_hugepage = 1
else:
use_hugepage = int(use_hugepage)
# Note that this will currently only make a difference on Linux
core.multiarray._set_madvise_hugepage(use_hugepage)
| true | true |
1c3b11e76c61005bee3f7ce49c5ed0618c32d9a9 | 8,309 | py | Python | tests/core/contracts/test_contract_constructor.py | ConnorMac/web3.py | c7fb8c91a93ce82286922440e236721719d50a98 | [
"MIT"
] | null | null | null | tests/core/contracts/test_contract_constructor.py | ConnorMac/web3.py | c7fb8c91a93ce82286922440e236721719d50a98 | [
"MIT"
] | null | null | null | tests/core/contracts/test_contract_constructor.py | ConnorMac/web3.py | c7fb8c91a93ce82286922440e236721719d50a98 | [
"MIT"
] | null | null | null | import pytest
from eth_utils import (
decode_hex,
)
TEST_ADDRESS = '0x16D9983245De15E7A9A73bC586E01FF6E08dE737'
EXPECTED_DATA_A = 1234
EXPECTED_DATA_B = (b'abcd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
def test_contract_constructor_abi_encoding_with_no_constructor_fn(MathContract, MATH_CODE):
deploy_data = MathContract.constructor()._encode_data_in_transaction()
assert deploy_data == MATH_CODE
def test_contract_constructor_gas_estimate_no_constructor(web3, MathContract):
gas_estimate = MathContract.constructor().estimateGas()
deploy_txn = MathContract.constructor().transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
gas_used = txn_receipt.get('gasUsed')
assert abs(gas_estimate - gas_used) < 21000
def test_contract_constructor_gas_estimate_with_constructor_without_arguments(
web3,
SimpleConstructorContract):
gas_estimate = SimpleConstructorContract.constructor().estimateGas()
deploy_txn = SimpleConstructorContract.constructor().transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
gas_used = txn_receipt.get('gasUsed')
assert abs(gas_estimate - gas_used) < 21000
@pytest.mark.parametrize(
'constructor_args,constructor_kwargs',
(
([1234, b'abcd'], {}),
([1234], {'b': b'abcd'}),
([], {'a': 1234, 'b': b'abcd'}),
([], {'b': b'abcd', 'a': 1234}),
),
)
def test_contract_constructor_gas_estimate_with_constructor_with_arguments(
web3,
WithConstructorArgumentsContract,
constructor_args,
constructor_kwargs):
gas_estimate = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).estimateGas()
deploy_txn = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
gas_used = txn_receipt.get('gasUsed')
assert abs(gas_estimate - gas_used) < 21000
def test_contract_constructor_gas_estimate_with_constructor_with_address_argument(
web3,
WithConstructorAddressArgumentsContract):
gas_estimate = WithConstructorAddressArgumentsContract.constructor(
"0x16D9983245De15E7A9A73bC586E01FF6E08dE737").estimateGas()
deploy_txn = WithConstructorAddressArgumentsContract.constructor(
"0x16D9983245De15E7A9A73bC586E01FF6E08dE737").transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
gas_used = txn_receipt.get('gasUsed')
assert abs(gas_estimate - gas_used) < 21000
def test_contract_constructor_transact_no_constructor(web3, MathContract, MATH_RUNTIME):
deploy_txn = MathContract.constructor().transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert txn_receipt is not None
assert txn_receipt['contractAddress']
contract_address = txn_receipt['contractAddress']
blockchain_code = web3.eth.getCode(contract_address)
assert blockchain_code == decode_hex(MATH_RUNTIME)
def test_contract_constructor_transact_with_constructor_without_arguments(
web3, SimpleConstructorContract, SIMPLE_CONSTRUCTOR_RUNTIME):
deploy_txn = SimpleConstructorContract.constructor().transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert txn_receipt is not None
assert txn_receipt['contractAddress']
contract_address = txn_receipt['contractAddress']
blockchain_code = web3.eth.getCode(contract_address)
assert blockchain_code == decode_hex(SIMPLE_CONSTRUCTOR_RUNTIME)
@pytest.mark.parametrize(
'constructor_args,constructor_kwargs, expected_a, expected_b',
(
([1234, b'abcd'], {}, EXPECTED_DATA_A, EXPECTED_DATA_B),
([1234], {'b': b'abcd'}, EXPECTED_DATA_A, EXPECTED_DATA_B),
([], {'a': 1234, 'b': b'abcd'}, EXPECTED_DATA_A, EXPECTED_DATA_B),
([], {'b': b'abcd', 'a': 1234}, EXPECTED_DATA_A, EXPECTED_DATA_B),
),
)
def test_contract_constructor_transact_with_constructor_with_arguments(
web3,
WithConstructorArgumentsContract,
WITH_CONSTRUCTOR_ARGUMENTS_RUNTIME,
constructor_args,
constructor_kwargs,
expected_a,
expected_b):
deploy_txn = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert txn_receipt is not None
assert txn_receipt['contractAddress']
contract_address = txn_receipt['contractAddress']
blockchain_code = web3.eth.getCode(contract_address)
assert blockchain_code == decode_hex(WITH_CONSTRUCTOR_ARGUMENTS_RUNTIME)
assert expected_a == WithConstructorArgumentsContract(
address=contract_address).functions.data_a().call()
assert expected_b == WithConstructorArgumentsContract(
address=contract_address).functions.data_b().call()
def test_contract_constructor_transact_with_constructor_with_address_arguments(
web3, WithConstructorAddressArgumentsContract, WITH_CONSTRUCTOR_ADDRESS_RUNTIME):
deploy_txn = WithConstructorAddressArgumentsContract.constructor(TEST_ADDRESS).transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert txn_receipt is not None
assert txn_receipt['contractAddress']
contract_address = txn_receipt['contractAddress']
blockchain_code = web3.eth.getCode(contract_address)
assert blockchain_code == decode_hex(WITH_CONSTRUCTOR_ADDRESS_RUNTIME)
assert TEST_ADDRESS == WithConstructorAddressArgumentsContract(
address=contract_address).functions.testAddr().call()
def test_contract_constructor_build_transaction_to_field_error(MathContract):
with pytest.raises(ValueError):
MathContract.constructor().buildTransaction({'to': '123'})
def test_contract_constructor_build_transaction_no_constructor(web3, MathContract):
txn_hash = MathContract.constructor().transact({'from': web3.eth.accounts[0]})
txn = web3.eth.getTransaction(txn_hash)
nonce = web3.eth.getTransactionCount(web3.eth.coinbase)
unsent_txn = MathContract.constructor().buildTransaction({'nonce': nonce})
assert txn['data'] == unsent_txn['data']
new_txn_hash = web3.eth.sendTransaction(unsent_txn)
new_txn = web3.eth.getTransaction(new_txn_hash)
assert new_txn['data'] == unsent_txn['data']
assert new_txn['nonce'] == nonce
def test_contract_constructor_build_transaction_with_constructor_without_argument(web3,
MathContract):
txn_hash = MathContract.constructor().transact({'from': web3.eth.accounts[0]})
txn = web3.eth.getTransaction(txn_hash)
nonce = web3.eth.getTransactionCount(web3.eth.coinbase)
unsent_txn = MathContract.constructor().buildTransaction({'nonce': nonce})
assert txn['data'] == unsent_txn['data']
new_txn_hash = web3.eth.sendTransaction(unsent_txn)
new_txn = web3.eth.getTransaction(new_txn_hash)
assert new_txn['data'] == unsent_txn['data']
assert new_txn['nonce'] == nonce
@pytest.mark.parametrize(
'constructor_args,constructor_kwargs',
(
([1234, b'abcd'], {}),
([1234], {'b': b'abcd'}),
([], {'a': 1234, 'b': b'abcd'}),
([], {'b': b'abcd', 'a': 1234}),
),
)
def test_contract_constructor_build_transaction_with_constructor_with_argument(
web3,
WithConstructorArgumentsContract,
constructor_args,
constructor_kwargs):
txn_hash = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).transact({'from': web3.eth.accounts[0]})
txn = web3.eth.getTransaction(txn_hash)
nonce = web3.eth.getTransactionCount(web3.eth.coinbase)
unsent_txn = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).buildTransaction({'nonce': nonce})
assert txn['data'] == unsent_txn['data']
new_txn_hash = web3.eth.sendTransaction(unsent_txn)
new_txn = web3.eth.getTransaction(new_txn_hash)
assert new_txn['data'] == unsent_txn['data']
assert new_txn['nonce'] == nonce
| 39.379147 | 96 | 0.736912 | import pytest
from eth_utils import (
decode_hex,
)
TEST_ADDRESS = '0x16D9983245De15E7A9A73bC586E01FF6E08dE737'
EXPECTED_DATA_A = 1234
EXPECTED_DATA_B = (b'abcd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
def test_contract_constructor_abi_encoding_with_no_constructor_fn(MathContract, MATH_CODE):
deploy_data = MathContract.constructor()._encode_data_in_transaction()
assert deploy_data == MATH_CODE
def test_contract_constructor_gas_estimate_no_constructor(web3, MathContract):
gas_estimate = MathContract.constructor().estimateGas()
deploy_txn = MathContract.constructor().transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
gas_used = txn_receipt.get('gasUsed')
assert abs(gas_estimate - gas_used) < 21000
def test_contract_constructor_gas_estimate_with_constructor_without_arguments(
web3,
SimpleConstructorContract):
gas_estimate = SimpleConstructorContract.constructor().estimateGas()
deploy_txn = SimpleConstructorContract.constructor().transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
gas_used = txn_receipt.get('gasUsed')
assert abs(gas_estimate - gas_used) < 21000
@pytest.mark.parametrize(
'constructor_args,constructor_kwargs',
(
([1234, b'abcd'], {}),
([1234], {'b': b'abcd'}),
([], {'a': 1234, 'b': b'abcd'}),
([], {'b': b'abcd', 'a': 1234}),
),
)
def test_contract_constructor_gas_estimate_with_constructor_with_arguments(
web3,
WithConstructorArgumentsContract,
constructor_args,
constructor_kwargs):
gas_estimate = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).estimateGas()
deploy_txn = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
gas_used = txn_receipt.get('gasUsed')
assert abs(gas_estimate - gas_used) < 21000
def test_contract_constructor_gas_estimate_with_constructor_with_address_argument(
web3,
WithConstructorAddressArgumentsContract):
gas_estimate = WithConstructorAddressArgumentsContract.constructor(
"0x16D9983245De15E7A9A73bC586E01FF6E08dE737").estimateGas()
deploy_txn = WithConstructorAddressArgumentsContract.constructor(
"0x16D9983245De15E7A9A73bC586E01FF6E08dE737").transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
gas_used = txn_receipt.get('gasUsed')
assert abs(gas_estimate - gas_used) < 21000
def test_contract_constructor_transact_no_constructor(web3, MathContract, MATH_RUNTIME):
deploy_txn = MathContract.constructor().transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert txn_receipt is not None
assert txn_receipt['contractAddress']
contract_address = txn_receipt['contractAddress']
blockchain_code = web3.eth.getCode(contract_address)
assert blockchain_code == decode_hex(MATH_RUNTIME)
def test_contract_constructor_transact_with_constructor_without_arguments(
web3, SimpleConstructorContract, SIMPLE_CONSTRUCTOR_RUNTIME):
deploy_txn = SimpleConstructorContract.constructor().transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert txn_receipt is not None
assert txn_receipt['contractAddress']
contract_address = txn_receipt['contractAddress']
blockchain_code = web3.eth.getCode(contract_address)
assert blockchain_code == decode_hex(SIMPLE_CONSTRUCTOR_RUNTIME)
@pytest.mark.parametrize(
'constructor_args,constructor_kwargs, expected_a, expected_b',
(
([1234, b'abcd'], {}, EXPECTED_DATA_A, EXPECTED_DATA_B),
([1234], {'b': b'abcd'}, EXPECTED_DATA_A, EXPECTED_DATA_B),
([], {'a': 1234, 'b': b'abcd'}, EXPECTED_DATA_A, EXPECTED_DATA_B),
([], {'b': b'abcd', 'a': 1234}, EXPECTED_DATA_A, EXPECTED_DATA_B),
),
)
def test_contract_constructor_transact_with_constructor_with_arguments(
web3,
WithConstructorArgumentsContract,
WITH_CONSTRUCTOR_ARGUMENTS_RUNTIME,
constructor_args,
constructor_kwargs,
expected_a,
expected_b):
deploy_txn = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert txn_receipt is not None
assert txn_receipt['contractAddress']
contract_address = txn_receipt['contractAddress']
blockchain_code = web3.eth.getCode(contract_address)
assert blockchain_code == decode_hex(WITH_CONSTRUCTOR_ARGUMENTS_RUNTIME)
assert expected_a == WithConstructorArgumentsContract(
address=contract_address).functions.data_a().call()
assert expected_b == WithConstructorArgumentsContract(
address=contract_address).functions.data_b().call()
def test_contract_constructor_transact_with_constructor_with_address_arguments(
web3, WithConstructorAddressArgumentsContract, WITH_CONSTRUCTOR_ADDRESS_RUNTIME):
deploy_txn = WithConstructorAddressArgumentsContract.constructor(TEST_ADDRESS).transact()
txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert txn_receipt is not None
assert txn_receipt['contractAddress']
contract_address = txn_receipt['contractAddress']
blockchain_code = web3.eth.getCode(contract_address)
assert blockchain_code == decode_hex(WITH_CONSTRUCTOR_ADDRESS_RUNTIME)
assert TEST_ADDRESS == WithConstructorAddressArgumentsContract(
address=contract_address).functions.testAddr().call()
def test_contract_constructor_build_transaction_to_field_error(MathContract):
with pytest.raises(ValueError):
MathContract.constructor().buildTransaction({'to': '123'})
def test_contract_constructor_build_transaction_no_constructor(web3, MathContract):
txn_hash = MathContract.constructor().transact({'from': web3.eth.accounts[0]})
txn = web3.eth.getTransaction(txn_hash)
nonce = web3.eth.getTransactionCount(web3.eth.coinbase)
unsent_txn = MathContract.constructor().buildTransaction({'nonce': nonce})
assert txn['data'] == unsent_txn['data']
new_txn_hash = web3.eth.sendTransaction(unsent_txn)
new_txn = web3.eth.getTransaction(new_txn_hash)
assert new_txn['data'] == unsent_txn['data']
assert new_txn['nonce'] == nonce
def test_contract_constructor_build_transaction_with_constructor_without_argument(web3,
MathContract):
txn_hash = MathContract.constructor().transact({'from': web3.eth.accounts[0]})
txn = web3.eth.getTransaction(txn_hash)
nonce = web3.eth.getTransactionCount(web3.eth.coinbase)
unsent_txn = MathContract.constructor().buildTransaction({'nonce': nonce})
assert txn['data'] == unsent_txn['data']
new_txn_hash = web3.eth.sendTransaction(unsent_txn)
new_txn = web3.eth.getTransaction(new_txn_hash)
assert new_txn['data'] == unsent_txn['data']
assert new_txn['nonce'] == nonce
@pytest.mark.parametrize(
'constructor_args,constructor_kwargs',
(
([1234, b'abcd'], {}),
([1234], {'b': b'abcd'}),
([], {'a': 1234, 'b': b'abcd'}),
([], {'b': b'abcd', 'a': 1234}),
),
)
def test_contract_constructor_build_transaction_with_constructor_with_argument(
web3,
WithConstructorArgumentsContract,
constructor_args,
constructor_kwargs):
txn_hash = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).transact({'from': web3.eth.accounts[0]})
txn = web3.eth.getTransaction(txn_hash)
nonce = web3.eth.getTransactionCount(web3.eth.coinbase)
unsent_txn = WithConstructorArgumentsContract.constructor(
*constructor_args, **constructor_kwargs).buildTransaction({'nonce': nonce})
assert txn['data'] == unsent_txn['data']
new_txn_hash = web3.eth.sendTransaction(unsent_txn)
new_txn = web3.eth.getTransaction(new_txn_hash)
assert new_txn['data'] == unsent_txn['data']
assert new_txn['nonce'] == nonce
| true | true |
1c3b11ea2dde509958ffa5d9fd6bbe510a6e9170 | 2,327 | py | Python | autotest/gdrivers/ngsgeoid.py | ajolma/gdal | 19d847c8519919fcd1e7e7247644d28771034317 | [
"MIT"
] | 1 | 2018-12-19T14:08:20.000Z | 2018-12-19T14:08:20.000Z | autotest/gdrivers/ngsgeoid.py | ajolma/gdal | 19d847c8519919fcd1e7e7247644d28771034317 | [
"MIT"
] | null | null | null | autotest/gdrivers/ngsgeoid.py | ajolma/gdal | 19d847c8519919fcd1e7e7247644d28771034317 | [
"MIT"
] | 1 | 2019-11-01T15:17:09.000Z | 2019-11-01T15:17:09.000Z | #!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for NGSGEOID driver.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2011, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import gdaltest
###############################################################################
# Test opening a little endian file
def test_ngsgeoid_1():
tst = gdaltest.GDALTest('NGSGEOID', 'g2009u01_le_truncated.bin', 1, 65534)
return tst.testOpen(check_gt=(229.99166666666667, 0.016666666666670001, 0.0, 40.00833333333334, 0.0, -0.016666666666670001), check_prj='WGS84')
###############################################################################
# Test opening a big endian file
def test_ngsgeoid_2():
tst = gdaltest.GDALTest('NGSGEOID', 'g2009u01_be_truncated.bin', 1, 65534)
return tst.testOpen(check_gt=(229.99166666666667, 0.016666666666670001, 0.0, 40.00833333333334, 0.0, -0.016666666666670001), check_prj='WGS84')
| 42.309091 | 147 | 0.629996 | true | true | |
1c3b125f2097520071768470165b007e704b4733 | 1,597 | py | Python | package/spack-py-functools32/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-py-functools32/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-py-functools32/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyFunctools32(PythonPackage):
"""Backport of the functools module from Python 3.2.3 for use on 2.7 and
PyPy."""
homepage = "https://github.com/MiCHiLU/python-functools32"
url = "https://pypi.io/packages/source/f/functools32/functools32-3.2.3-2.tar.gz"
version('3.2.3-2', '09f24ffd9af9f6cd0f63cb9f4e23d4b2')
| 44.361111 | 89 | 0.678147 | true | true | |
1c3b127d4fb57402793e5f094a3c33d525524685 | 3,950 | py | Python | networkx/algorithms/tree/tests/test_coding.py | ChrisKeefe/networkx | 66cc88193e5be96f866adac2ef4ccdbe55bab335 | [
"BSD-3-Clause"
] | 1 | 2021-12-10T18:55:32.000Z | 2021-12-10T18:55:32.000Z | networkx/algorithms/tree/tests/test_coding.py | Reed-CompBio/networkx | c266c4b29699290333dff5440e3c9e3029ec0341 | [
"BSD-3-Clause"
] | null | null | null | networkx/algorithms/tree/tests/test_coding.py | Reed-CompBio/networkx | c266c4b29699290333dff5440e3c9e3029ec0341 | [
"BSD-3-Clause"
] | null | null | null | """Unit tests for the :mod:`~networkx.algorithms.tree.coding` module."""
from itertools import product
import pytest
import networkx as nx
from networkx.utils import nodes_equal, edges_equal
class TestPruferSequence:
"""Unit tests for the Prüfer sequence encoding and decoding
functions.
"""
def test_nontree(self):
with pytest.raises(nx.NotATree):
G = nx.cycle_graph(3)
nx.to_prufer_sequence(G)
def test_null_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.to_prufer_sequence(nx.null_graph())
def test_trivial_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.to_prufer_sequence(nx.trivial_graph())
def test_bad_integer_labels(self):
with pytest.raises(KeyError):
T = nx.Graph(nx.utils.pairwise("abc"))
nx.to_prufer_sequence(T)
def test_encoding(self):
"""Tests for encoding a tree as a Prüfer sequence using the
iterative strategy.
"""
# Example from Wikipedia.
tree = nx.Graph([(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)])
sequence = nx.to_prufer_sequence(tree)
assert sequence == [3, 3, 3, 4]
def test_decoding(self):
"""Tests for decoding a tree from a Prüfer sequence."""
# Example from Wikipedia.
sequence = [3, 3, 3, 4]
tree = nx.from_prufer_sequence(sequence)
assert nodes_equal(list(tree), list(range(6)))
edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]
assert edges_equal(list(tree.edges()), edges)
def test_decoding2(self):
# Example from "An Optimal Algorithm for Prufer Codes".
sequence = [2, 4, 0, 1, 3, 3]
tree = nx.from_prufer_sequence(sequence)
assert nodes_equal(list(tree), list(range(8)))
edges = [(0, 1), (0, 4), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)]
assert edges_equal(list(tree.edges()), edges)
def test_inverse(self):
"""Tests that the encoding and decoding functions are inverses."""
for T in nx.nonisomorphic_trees(4):
T2 = nx.from_prufer_sequence(nx.to_prufer_sequence(T))
assert nodes_equal(list(T), list(T2))
assert edges_equal(list(T.edges()), list(T2.edges()))
for seq in product(range(4), repeat=2):
seq2 = nx.to_prufer_sequence(nx.from_prufer_sequence(seq))
assert list(seq) == seq2
class TestNestedTuple:
"""Unit tests for the nested tuple encoding and decoding functions."""
def test_nontree(self):
with pytest.raises(nx.NotATree):
G = nx.cycle_graph(3)
nx.to_nested_tuple(G, 0)
def test_unknown_root(self):
with pytest.raises(nx.NodeNotFound):
G = nx.path_graph(2)
nx.to_nested_tuple(G, "bogus")
def test_encoding(self):
T = nx.full_rary_tree(2, 2**3 - 1)
expected = (((), ()), ((), ()))
actual = nx.to_nested_tuple(T, 0)
assert nodes_equal(expected, actual)
def test_canonical_form(self):
T = nx.Graph()
T.add_edges_from([(0, 1), (0, 2), (0, 3)])
T.add_edges_from([(1, 4), (1, 5)])
T.add_edges_from([(3, 6), (3, 7)])
root = 0
actual = nx.to_nested_tuple(T, root, canonical_form=True)
expected = ((), ((), ()), ((), ()))
assert actual == expected
def test_decoding(self):
balanced = (((), ()), ((), ()))
expected = nx.full_rary_tree(2, 2**3 - 1)
actual = nx.from_nested_tuple(balanced)
assert nx.is_isomorphic(expected, actual)
def test_sensible_relabeling(self):
balanced = (((), ()), ((), ()))
T = nx.from_nested_tuple(balanced, sensible_relabeling=True)
edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]
assert nodes_equal(list(T), list(range(2**3 - 1)))
assert edges_equal(list(T.edges()), edges)
| 34.955752 | 74 | 0.587342 | from itertools import product
import pytest
import networkx as nx
from networkx.utils import nodes_equal, edges_equal
class TestPruferSequence:
def test_nontree(self):
with pytest.raises(nx.NotATree):
G = nx.cycle_graph(3)
nx.to_prufer_sequence(G)
def test_null_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.to_prufer_sequence(nx.null_graph())
def test_trivial_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.to_prufer_sequence(nx.trivial_graph())
def test_bad_integer_labels(self):
with pytest.raises(KeyError):
T = nx.Graph(nx.utils.pairwise("abc"))
nx.to_prufer_sequence(T)
def test_encoding(self):
tree = nx.Graph([(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)])
sequence = nx.to_prufer_sequence(tree)
assert sequence == [3, 3, 3, 4]
def test_decoding(self):
sequence = [3, 3, 3, 4]
tree = nx.from_prufer_sequence(sequence)
assert nodes_equal(list(tree), list(range(6)))
edges = [(0, 3), (1, 3), (2, 3), (3, 4), (4, 5)]
assert edges_equal(list(tree.edges()), edges)
def test_decoding2(self):
sequence = [2, 4, 0, 1, 3, 3]
tree = nx.from_prufer_sequence(sequence)
assert nodes_equal(list(tree), list(range(8)))
edges = [(0, 1), (0, 4), (1, 3), (2, 4), (2, 5), (3, 6), (3, 7)]
assert edges_equal(list(tree.edges()), edges)
def test_inverse(self):
for T in nx.nonisomorphic_trees(4):
T2 = nx.from_prufer_sequence(nx.to_prufer_sequence(T))
assert nodes_equal(list(T), list(T2))
assert edges_equal(list(T.edges()), list(T2.edges()))
for seq in product(range(4), repeat=2):
seq2 = nx.to_prufer_sequence(nx.from_prufer_sequence(seq))
assert list(seq) == seq2
class TestNestedTuple:
def test_nontree(self):
with pytest.raises(nx.NotATree):
G = nx.cycle_graph(3)
nx.to_nested_tuple(G, 0)
def test_unknown_root(self):
with pytest.raises(nx.NodeNotFound):
G = nx.path_graph(2)
nx.to_nested_tuple(G, "bogus")
def test_encoding(self):
T = nx.full_rary_tree(2, 2**3 - 1)
expected = (((), ()), ((), ()))
actual = nx.to_nested_tuple(T, 0)
assert nodes_equal(expected, actual)
def test_canonical_form(self):
T = nx.Graph()
T.add_edges_from([(0, 1), (0, 2), (0, 3)])
T.add_edges_from([(1, 4), (1, 5)])
T.add_edges_from([(3, 6), (3, 7)])
root = 0
actual = nx.to_nested_tuple(T, root, canonical_form=True)
expected = ((), ((), ()), ((), ()))
assert actual == expected
def test_decoding(self):
balanced = (((), ()), ((), ()))
expected = nx.full_rary_tree(2, 2**3 - 1)
actual = nx.from_nested_tuple(balanced)
assert nx.is_isomorphic(expected, actual)
def test_sensible_relabeling(self):
balanced = (((), ()), ((), ()))
T = nx.from_nested_tuple(balanced, sensible_relabeling=True)
edges = [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)]
assert nodes_equal(list(T), list(range(2**3 - 1)))
assert edges_equal(list(T.edges()), edges)
| true | true |
1c3b1290a5f5a149f55700f326f5cd4656d70e80 | 507 | py | Python | photos/urls.py | Alvin-21/gallery | c121fb50581328cde082459c5adab27df9d57ed6 | [
"Unlicense"
] | null | null | null | photos/urls.py | Alvin-21/gallery | c121fb50581328cde082459c5adab27df9d57ed6 | [
"Unlicense"
] | null | null | null | photos/urls.py | Alvin-21/gallery | c121fb50581328cde082459c5adab27df9d57ed6 | [
"Unlicense"
] | null | null | null | from django.urls import path, re_path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
re_path(r'^$', views.index, name='homepage'),
re_path(r'^image/(\d+)', views.view_image, name='image'),
re_path(r'^categories/', views.search_categories, name='categories'),
re_path(r'^cities/', views.search_locations, name='locations'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 36.214286 | 80 | 0.731755 | from django.urls import path, re_path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
re_path(r'^$', views.index, name='homepage'),
re_path(r'^image/(\d+)', views.view_image, name='image'),
re_path(r'^categories/', views.search_categories, name='categories'),
re_path(r'^cities/', views.search_locations, name='locations'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | true | true |
1c3b1312076676887ced2aceae880c7d66ff9ffb | 7,212 | py | Python | scripts/eda_script.py | Sukriti1312/DSCI-522_City_of_A-Stars_310 | 3cbfd1c238a86bcc4c3ddeb4d4cf83b90310e4ad | [
"MIT"
] | null | null | null | scripts/eda_script.py | Sukriti1312/DSCI-522_City_of_A-Stars_310 | 3cbfd1c238a86bcc4c3ddeb4d4cf83b90310e4ad | [
"MIT"
] | 47 | 2020-01-17T00:42:37.000Z | 2020-02-13T00:09:00.000Z | scripts/eda_script.py | Sukriti1312/DSCI-522_City_of_A-Stars_310 | 3cbfd1c238a86bcc4c3ddeb4d4cf83b90310e4ad | [
"MIT"
] | 3 | 2020-01-17T01:22:59.000Z | 2020-01-18T23:42:32.000Z | # author: A. Muhammad
# date: 2020-02-01
'''This script performs EDA on the students performance datasets
for portuguese and math students and outputs necessary tables and
figures to path provided.
Usage: eda_script.py --file_path=<file_path> --results_path=<results_path>
Example:
python scripts/eda_script.py --file_path=data/ --results_path=results/
Options:
--file_path=<file_path> Path (excluding filenames) to the csv file.
--results_path=<results_path> Path for saving plots.
'''
import pandas as pd
import numpy as np
from docopt import docopt
import altair as alt
import re
import os
opt = docopt(__doc__)
def test_function():
"""
Tests the input and output
file paths.
"""
file_path_check = re.match("([A-Za-z]+[.]{1}[A-Za-z]+)", opt["--file_path"])
out_path_check = re.match("([A-Za-z]+[.]{1}[A-Za-z]+)", opt["--results_path"])
assert file_path_check == None, "you can not have extensions in path, only directories."
assert out_path_check == None, "you can not have extensions in path, only directories."
try:
os.listdir(opt["--file_path"])
os.listdir(opt["--results_path"])
except Exception as e:
print(e)
# test function runs here
test_function()
opt = docopt(__doc__)
def main(file_path, results_path):
# read in data
df_mat = pd.read_csv(file_path + "student-mat_clean.csv")
df_por = pd.read_csv(file_path + "student-por_clean.csv")
# register the custom theme under a chosen name
alt.themes.register('mds_special', mds_special)
# enable the newly registered theme
alt.themes.enable('mds_special')
## tables
# agg table math
df_math_agg = df_mat[["romantic", "total_grade"]].groupby("romantic").agg(['count', 'mean', 'std'])
df_math_agg['total_grade'].reset_index().round(4).to_csv(results_path + "math_table.csv", index=False)
# agg table por
df_por_agg = df_por[["romantic", "total_grade"]].groupby("romantic").agg(['count', 'mean', 'std'])
df_por_agg['total_grade'].reset_index().round(4).to_csv(results_path + "por_table.csv", index=False)
## print certain findings
print("{} math students were in relationships and {} were not.".format(
df_mat['romantic'].value_counts()['yes'],
df_mat['romantic'].value_counts()['no']))
print("{} portuguese language students were in relationships and {} were not.".format(
df_por['romantic'].value_counts()['yes'],
df_por['romantic'].value_counts()['no']))
print("The average total grade for math students in relationships was: {:.2f}/60".format(
df_mat[df_mat['romantic'] == 'yes']['total_grade'].mean()))
print("The average total grade for math students not in relationships was: {:.2f}/60".format(
df_mat[df_mat['romantic'] == 'no']['total_grade'].mean()))
print("The average total grade for portuguese students in relationships was: {:.2f}/60".format(
df_por[df_por['romantic'] == 'yes']['total_grade'].mean()))
print("The average total grade for portuguese students not in relationships was: {:.2f}/60".format(
df_por[df_por['romantic'] == 'no']['total_grade'].mean()))
## make plots
p_1_1 = alt.Chart(df_mat[df_mat['romantic']=="yes"]).transform_density(
'total_grade',
as_=['total_grade', 'density'],
).mark_bar().encode(
x=alt.X("total_grade:Q", title="Total grade", bin = alt.Bin(extent=[0, 60], step=5)),
y='density:Q',
).properties(
width = 300,
height = 400,
title = "In relationship"
)
p_1_2 = alt.Chart(df_mat[df_mat['romantic']=="no"]).transform_density(
'total_grade',
as_=['total_grade', 'density'],
).mark_bar(color='orange').encode(
x=alt.X("total_grade:Q", title="Total grade", bin = alt.Bin(extent=[0, 60], step=5)),
y='density:Q',
).properties(
width = 300,
height = 400,
title = "Not in relationship"
)
P_math = p_1_1 | p_1_2
P_math.configure_title(
fontSize=14,
)
p_2_1 = alt.Chart(df_por[df_por['romantic']=="yes"]).transform_density(
'total_grade',
as_=['total_grade', 'density'],
).mark_bar().encode(
x=alt.X("total_grade:Q", title="Total grade", bin = alt.Bin(extent=[0, 60], step=5)),
y='density:Q',
).properties(
width = 300,
height = 400,
title = "In relationship"
)
p_2_2 = alt.Chart(df_por[df_por['romantic']=="no"]).transform_density(
'total_grade',
as_=['total_grade', 'density'],
).mark_bar(color='orange').encode(
x=alt.X("total_grade:Q", title="Total grade", bin = alt.Bin(extent=[0, 60], step=5)),
y='density:Q',
).properties(
width = 300,
height = 400,
title = "Not in relationship"
)
P_por = p_2_1 | p_2_2
P_por.configure_title(
fontSize=14,
)
## save plots
P_math.save(results_path + "figures/math_plot.png", webdriver='chrome')
P_por.save(results_path + "figures/por_plot.png", webdriver='chrome')
def mds_special():
"""
Applies mds_special theme to plots
created by
Firas Moosvi, instructor at UBC
Master of Data Science program.
"""
font = "Arial"
axisColor = "#000000"
gridColor = "#DEDDDD"
return {
"config": {
"title": {
"fontSize": 24,
"font": font,
"anchor": "start", # equivalent of left-aligned.
"fontColor": "#000000"
},
"background": "white",
"axisX": {
"domain": True,
#"domainColor": axisColor,
"gridColor": gridColor,
"domainWidth": 1,
"grid": False,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
#"tickColor": axisColor,
"tickSize": 5, # default, including it just to show you can change it
#"titleFont": font,
"titleFontSize": 18,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "X Axis Title (units)",
},
"axisY": {
"domain": False,
"grid": True,
"gridColor": gridColor,
"gridWidth": 1,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
#"ticks": False, # even if you don't have a "domain" you need to turn these off.
"titleFont": font,
"titleFontSize": 18,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "Y Axis Title (units)",
# titles are by default vertical left of axis so we need to hack this
#"titleAngle": 0, # horizontal
#"titleY": -10, # move it up
#"titleX": 18, # move it to the right so it aligns with the labels
},
}
}
if __name__ == "__main__":
main(opt["--file_path"], opt["--results_path"])
| 35.009709 | 106 | 0.57848 |
import pandas as pd
import numpy as np
from docopt import docopt
import altair as alt
import re
import os
opt = docopt(__doc__)
def test_function():
file_path_check = re.match("([A-Za-z]+[.]{1}[A-Za-z]+)", opt["--file_path"])
out_path_check = re.match("([A-Za-z]+[.]{1}[A-Za-z]+)", opt["--results_path"])
assert file_path_check == None, "you can not have extensions in path, only directories."
assert out_path_check == None, "you can not have extensions in path, only directories."
try:
os.listdir(opt["--file_path"])
os.listdir(opt["--results_path"])
except Exception as e:
print(e)
test_function()
opt = docopt(__doc__)
def main(file_path, results_path):
df_mat = pd.read_csv(file_path + "student-mat_clean.csv")
df_por = pd.read_csv(file_path + "student-por_clean.csv")
alt.themes.register('mds_special', mds_special)
alt.themes.enable('mds_special')
df_math_agg = df_mat[["romantic", "total_grade"]].groupby("romantic").agg(['count', 'mean', 'std'])
df_math_agg['total_grade'].reset_index().round(4).to_csv(results_path + "math_table.csv", index=False)
df_por_agg = df_por[["romantic", "total_grade"]].groupby("romantic").agg(['count', 'mean', 'std'])
df_por_agg['total_grade'].reset_index().round(4).to_csv(results_path + "por_table.csv", index=False)
ents were in relationships and {} were not.".format(
df_mat['romantic'].value_counts()['yes'],
df_mat['romantic'].value_counts()['no']))
print("{} portuguese language students were in relationships and {} were not.".format(
df_por['romantic'].value_counts()['yes'],
df_por['romantic'].value_counts()['no']))
print("The average total grade for math students in relationships was: {:.2f}/60".format(
df_mat[df_mat['romantic'] == 'yes']['total_grade'].mean()))
print("The average total grade for math students not in relationships was: {:.2f}/60".format(
df_mat[df_mat['romantic'] == 'no']['total_grade'].mean()))
print("The average total grade for portuguese students in relationships was: {:.2f}/60".format(
df_por[df_por['romantic'] == 'yes']['total_grade'].mean()))
print("The average total grade for portuguese students not in relationships was: {:.2f}/60".format(
df_por[df_por['romantic'] == 'no']['total_grade'].mean()))
alt.Chart(df_mat[df_mat['romantic']=="yes"]).transform_density(
'total_grade',
as_=['total_grade', 'density'],
).mark_bar().encode(
x=alt.X("total_grade:Q", title="Total grade", bin = alt.Bin(extent=[0, 60], step=5)),
y='density:Q',
).properties(
width = 300,
height = 400,
title = "In relationship"
)
p_1_2 = alt.Chart(df_mat[df_mat['romantic']=="no"]).transform_density(
'total_grade',
as_=['total_grade', 'density'],
).mark_bar(color='orange').encode(
x=alt.X("total_grade:Q", title="Total grade", bin = alt.Bin(extent=[0, 60], step=5)),
y='density:Q',
).properties(
width = 300,
height = 400,
title = "Not in relationship"
)
P_math = p_1_1 | p_1_2
P_math.configure_title(
fontSize=14,
)
p_2_1 = alt.Chart(df_por[df_por['romantic']=="yes"]).transform_density(
'total_grade',
as_=['total_grade', 'density'],
).mark_bar().encode(
x=alt.X("total_grade:Q", title="Total grade", bin = alt.Bin(extent=[0, 60], step=5)),
y='density:Q',
).properties(
width = 300,
height = 400,
title = "In relationship"
)
p_2_2 = alt.Chart(df_por[df_por['romantic']=="no"]).transform_density(
'total_grade',
as_=['total_grade', 'density'],
).mark_bar(color='orange').encode(
x=alt.X("total_grade:Q", title="Total grade", bin = alt.Bin(extent=[0, 60], step=5)),
y='density:Q',
).properties(
width = 300,
height = 400,
title = "Not in relationship"
)
P_por = p_2_1 | p_2_2
P_por.configure_title(
fontSize=14,
)
save(results_path + "figures/math_plot.png", webdriver='chrome')
P_por.save(results_path + "figures/por_plot.png", webdriver='chrome')
def mds_special():
font = "Arial"
axisColor = "#000000"
gridColor = "#DEDDDD"
return {
"config": {
"title": {
"fontSize": 24,
"font": font,
"anchor": "start",
"fontColor": "#000000"
},
"background": "white",
"axisX": {
"domain": True,
"gridColor": gridColor,
"domainWidth": 1,
"grid": False,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
"tickSize": 5,
"titleFontSize": 18,
"titlePadding": 10,
"title": "X Axis Title (units)",
},
"axisY": {
"domain": False,
"grid": True,
"gridColor": gridColor,
"gridWidth": 1,
"labelFont": font,
"labelFontSize": 12,
"labelAngle": 0,
ize": 18,
"titlePadding": 10, # guessing, not specified in styleguide
"title": "Y Axis Title (units)",
# titles are by default vertical left of axis so we need to hack this
#"titleAngle": 0, # horizontal
#"titleY": -10, # move it up
#"titleX": 18, # move it to the right so it aligns with the labels
},
}
}
if __name__ == "__main__":
main(opt["--file_path"], opt["--results_path"])
| true | true |
1c3b13ee68807c4a9242c0e6b8c68380acb5bdd8 | 4,522 | py | Python | userbot/modules/snips.py | hanuraiga/gabot-3 | 31885f3f1f0c57548fa709f4523818925bf4e2ab | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2019-08-12T11:59:48.000Z | 2019-08-12T11:59:48.000Z | userbot/modules/snips.py | arraqueen/userbot | 0e405f2d6b20ceed8e5edd6e77980081d61bc5b7 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/snips.py | arraqueen/userbot | 0e405f2d6b20ceed8e5edd6e77980081d61bc5b7 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-03-26T10:48:43.000Z | 2020-03-26T10:48:43.000Z | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module containing commands for keeping global notes. """
from userbot.events import register
from userbot import CMD_HELP, BOTLOG_CHATID
@register(outgoing=True,
pattern=r"\$\w*",
ignore_unsafe=True,
disable_errors=True)
async def on_snip(event):
""" Snips logic. """
try:
from userbot.modules.sql_helper.snips_sql import get_snip
except AttributeError:
return
name = event.text[1:]
snip = get_snip(name)
message_id_to_reply = event.message.reply_to_msg_id
if not message_id_to_reply:
message_id_to_reply = None
if snip and snip.f_mesg_id:
msg_o = await event.client.get_messages(entity=BOTLOG_CHATID,
ids=int(snip.f_mesg_id))
await event.client.send_message(event.chat_id,
msg_o.message,
reply_to=message_id_to_reply,
file=msg_o.media)
elif snip and snip.reply:
await event.client.send_message(event.chat_id,
snip.reply,
reply_to=message_id_to_reply)
@register(outgoing=True, pattern=r"^.snip (\w*)")
async def on_snip_save(event):
""" For .snip command, saves snips for future use. """
try:
from userbot.modules.sql_helper.snips_sql import add_snip
except AttributeError:
await event.edit("`Running on Non-SQL mode!`")
return
keyword = event.pattern_match.group(1)
string = event.text.partition(keyword)[2]
msg = await event.get_reply_message()
msg_id = None
if msg and msg.media and not string:
if BOTLOG_CHATID:
await event.client.send_message(
BOTLOG_CHATID, f"#SNIP\
\nKEYWORD: {keyword}\
\n\nThe following message is saved as the data for the snip, please do NOT delete it !!"
)
msg_o = await event.client.forward_messages(
entity=BOTLOG_CHATID,
messages=msg,
from_peer=event.chat_id,
silent=True)
msg_id = msg_o.id
else:
await event.edit(
"`Saving snips with media requires the BOTLOG_CHATID to be set.`"
)
return
elif event.reply_to_msg_id and not string:
rep_msg = await event.get_reply_message()
string = rep_msg.text
success = "`Snip {} successfully. Use` **${}** `anywhere to get it`"
if add_snip(keyword, string, msg_id) is False:
await event.edit(success.format('updated', keyword))
else:
await event.edit(success.format('saved', keyword))
@register(outgoing=True, pattern="^.snips$")
async def on_snip_list(event):
""" For .snips command, lists snips saved by you. """
try:
from userbot.modules.sql_helper.snips_sql import get_snips
except AttributeError:
await event.edit("`Running on Non-SQL mode!`")
return
message = "`No snips available right now.`"
all_snips = get_snips()
for a_snip in all_snips:
if message == "`No snips available right now.`":
message = "Available snips:\n"
message += f"`${a_snip.snip}`\n"
else:
message += f"`${a_snip.snip}`\n"
await event.edit(message)
@register(outgoing=True, pattern=r"^.remsnip (\w*)")
async def on_snip_delete(event):
""" For .remsnip command, deletes a snip. """
try:
from userbot.modules.sql_helper.snips_sql import remove_snip
except AttributeError:
await event.edit("`Running on Non-SQL mode!`")
return
name = event.pattern_match.group(1)
if remove_snip(name) is True:
await event.edit(f"`Successfully deleted snip:` **{name}**")
else:
await event.edit(f"`Couldn't find snip:` **{name}**")
CMD_HELP.update({
"snips":
"\
$<snip_name>\
\nUsage: Gets the specified snip, anywhere.\
\n\n.snip <name> <data> or reply to a message with .snip <name>\
\nUsage: Saves the message as a snip (global note) with the name. (Works with pics, docs, and stickers too!)\
\n\n.snips\
\nUsage: Gets all saved snips.\
\n\n.remsnip <snip_name>\
\nUsage: Deletes the specified snip.\
"
})
| 35.328125 | 109 | 0.610128 |
from userbot.events import register
from userbot import CMD_HELP, BOTLOG_CHATID
@register(outgoing=True,
pattern=r"\$\w*",
ignore_unsafe=True,
disable_errors=True)
async def on_snip(event):
try:
from userbot.modules.sql_helper.snips_sql import get_snip
except AttributeError:
return
name = event.text[1:]
snip = get_snip(name)
message_id_to_reply = event.message.reply_to_msg_id
if not message_id_to_reply:
message_id_to_reply = None
if snip and snip.f_mesg_id:
msg_o = await event.client.get_messages(entity=BOTLOG_CHATID,
ids=int(snip.f_mesg_id))
await event.client.send_message(event.chat_id,
msg_o.message,
reply_to=message_id_to_reply,
file=msg_o.media)
elif snip and snip.reply:
await event.client.send_message(event.chat_id,
snip.reply,
reply_to=message_id_to_reply)
@register(outgoing=True, pattern=r"^.snip (\w*)")
async def on_snip_save(event):
try:
from userbot.modules.sql_helper.snips_sql import add_snip
except AttributeError:
await event.edit("`Running on Non-SQL mode!`")
return
keyword = event.pattern_match.group(1)
string = event.text.partition(keyword)[2]
msg = await event.get_reply_message()
msg_id = None
if msg and msg.media and not string:
if BOTLOG_CHATID:
await event.client.send_message(
BOTLOG_CHATID, f"#SNIP\
\nKEYWORD: {keyword}\
\n\nThe following message is saved as the data for the snip, please do NOT delete it !!"
)
msg_o = await event.client.forward_messages(
entity=BOTLOG_CHATID,
messages=msg,
from_peer=event.chat_id,
silent=True)
msg_id = msg_o.id
else:
await event.edit(
"`Saving snips with media requires the BOTLOG_CHATID to be set.`"
)
return
elif event.reply_to_msg_id and not string:
rep_msg = await event.get_reply_message()
string = rep_msg.text
success = "`Snip {} successfully. Use` **${}** `anywhere to get it`"
if add_snip(keyword, string, msg_id) is False:
await event.edit(success.format('updated', keyword))
else:
await event.edit(success.format('saved', keyword))
@register(outgoing=True, pattern="^.snips$")
async def on_snip_list(event):
try:
from userbot.modules.sql_helper.snips_sql import get_snips
except AttributeError:
await event.edit("`Running on Non-SQL mode!`")
return
message = "`No snips available right now.`"
all_snips = get_snips()
for a_snip in all_snips:
if message == "`No snips available right now.`":
message = "Available snips:\n"
message += f"`${a_snip.snip}`\n"
else:
message += f"`${a_snip.snip}`\n"
await event.edit(message)
@register(outgoing=True, pattern=r"^.remsnip (\w*)")
async def on_snip_delete(event):
try:
from userbot.modules.sql_helper.snips_sql import remove_snip
except AttributeError:
await event.edit("`Running on Non-SQL mode!`")
return
name = event.pattern_match.group(1)
if remove_snip(name) is True:
await event.edit(f"`Successfully deleted snip:` **{name}**")
else:
await event.edit(f"`Couldn't find snip:` **{name}**")
CMD_HELP.update({
"snips":
"\
$<snip_name>\
\nUsage: Gets the specified snip, anywhere.\
\n\n.snip <name> <data> or reply to a message with .snip <name>\
\nUsage: Saves the message as a snip (global note) with the name. (Works with pics, docs, and stickers too!)\
\n\n.snips\
\nUsage: Gets all saved snips.\
\n\n.remsnip <snip_name>\
\nUsage: Deletes the specified snip.\
"
})
| true | true |
1c3b152240d277003e63796921f3238bacc0ddb1 | 109 | py | Python | addmoneyMatt.py | rkelly07/dkekeg | bfface42939c825641d2c5135d91d628ea079ad7 | [
"MIT"
] | null | null | null | addmoneyMatt.py | rkelly07/dkekeg | bfface42939c825641d2c5135d91d628ea079ad7 | [
"MIT"
] | null | null | null | addmoneyMatt.py | rkelly07/dkekeg | bfface42939c825641d2c5135d91d628ea079ad7 | [
"MIT"
] | null | null | null | import DBAccessor
dbaccessor = DBAccessor.DBAccessor()
dbaccessor.updateBalance("172 165 182 203",1000.00)
| 18.166667 | 51 | 0.798165 | import DBAccessor
dbaccessor = DBAccessor.DBAccessor()
dbaccessor.updateBalance("172 165 182 203",1000.00)
| true | true |
1c3b15dce145b18cc68dcfe927d12d0d41f8ac84 | 9,921 | py | Python | piecrust/configuration.py | airbornemint/PieCrust2 | bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb | [
"Apache-2.0"
] | null | null | null | piecrust/configuration.py | airbornemint/PieCrust2 | bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb | [
"Apache-2.0"
] | null | null | null | piecrust/configuration.py | airbornemint/PieCrust2 | bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb | [
"Apache-2.0"
] | null | null | null | import re
import logging
import collections
import collections.abc
import yaml
from yaml.constructor import ConstructorError
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
logger = logging.getLogger(__name__)
default_allowed_types = (dict, list, tuple, float, int, bool, str)
class ConfigurationError(Exception):
pass
class Configuration(collections.abc.MutableMapping):
def __init__(self, values=None, validate=True):
if values is not None:
self.setAll(values, validate=validate)
else:
self._values = None
def __getitem__(self, key):
self._ensureLoaded()
try:
return get_dict_value(self._values, key)
except KeyError:
raise KeyError("No such item: %s" % key)
def __setitem__(self, key, value):
self._ensureLoaded()
value = self._validateValue(key, value)
set_dict_value(self._values, key, value)
def __delitem__(self, key):
raise NotImplementedError()
def __iter__(self):
self._ensureLoaded()
return iter(self._values)
def __len__(self):
self._ensureLoaded()
return len(self._values)
def has(self, key):
return key in self
def set(self, key, value):
self[key] = value
def setAll(self, values, validate=False):
if validate:
values = self._validateAll(values)
self._values = values
def getAll(self):
self._ensureLoaded()
return self._values
def merge(self, other):
self._ensureLoaded()
if isinstance(other, dict):
other_values = other
elif isinstance(other, Configuration):
other_values = other._values
else:
raise Exception(
"Unsupported value type to merge: %s" % type(other))
merge_dicts(self._values, other_values,
validator=self._validateValue)
def validateTypes(self, allowed_types=default_allowed_types):
self._validateDictTypesRecursive(self._values, allowed_types)
def _validateDictTypesRecursive(self, d, allowed_types):
for k, v in d.items():
if not isinstance(k, str):
raise ConfigurationError("Key '%s' is not a string." % k)
self._validateTypeRecursive(v, allowed_types)
def _validateListTypesRecursive(self, l, allowed_types):
for v in l:
self._validateTypeRecursive(v, allowed_types)
def _validateTypeRecursive(self, v, allowed_types):
if v is None:
return
if not isinstance(v, allowed_types):
raise ConfigurationError(
"Value '%s' is of forbidden type: %s" % (v, type(v)))
if isinstance(v, dict):
self._validateDictTypesRecursive(v, allowed_types)
elif isinstance(v, list):
self._validateListTypesRecursive(v, allowed_types)
def _ensureLoaded(self):
if self._values is None:
self._load()
def _load(self):
self._values = self._validateAll({})
def _validateAll(self, values):
return values
def _validateValue(self, key_path, value):
return value
def get_dict_value(d, key):
bits = key.split('/')
cur = d
for b in bits:
cur = cur[b]
return cur
def get_dict_values(*args):
for d, key in args:
try:
return get_dict_value(d, key)
except KeyError:
continue
raise KeyError()
def try_get_dict_value(d, key, *, default=None):
try:
return get_dict_value(d, key)
except KeyError:
return default
def try_get_dict_values(*args, default=None):
for d, key in args:
try:
return get_dict_value(d, key)
except KeyError:
continue
return default
def set_dict_value(d, key, value):
bits = key.split('/')
bitslen = len(bits)
cur = d
for i, b in enumerate(bits):
if i == bitslen - 1:
cur[b] = value
else:
if b not in cur:
cur[b] = {}
cur = cur[b]
MERGE_NEW_VALUES = 0
MERGE_OVERWRITE_VALUES = 1
MERGE_PREPEND_LISTS = 2
MERGE_APPEND_LISTS = 4
MERGE_ALL = MERGE_OVERWRITE_VALUES | MERGE_PREPEND_LISTS
def merge_dicts(source, merging, *args,
validator=None, mode=MERGE_ALL):
_recurse_merge_dicts(source, merging, None, validator, mode)
for other in args:
_recurse_merge_dicts(source, other, None, validator, mode)
return source
def _recurse_merge_dicts(local_cur, incoming_cur, parent_path,
validator, mode):
for k, v in incoming_cur.items():
key_path = k
if parent_path is not None:
key_path = parent_path + '/' + k
local_v = local_cur.get(k)
if local_v is not None:
if isinstance(v, dict) and isinstance(local_v, dict):
_recurse_merge_dicts(local_v, v, key_path,
validator, mode)
elif isinstance(v, list) and isinstance(local_v, list):
if mode & MERGE_PREPEND_LISTS:
local_cur[k] = v + local_v
elif mode & MERGE_APPEND_LISTS:
local_cur[k] = local_v + v
else:
if mode & MERGE_OVERWRITE_VALUES:
if validator is not None:
v = validator(key_path, v)
local_cur[k] = v
else:
if ((mode & (MERGE_PREPEND_LISTS | MERGE_APPEND_LISTS)) or
not isinstance(v, list)):
if validator is not None:
v = validator(key_path, v)
local_cur[k] = v
def visit_dict(subject, visitor):
_recurse_visit_dict(subject, None, visitor)
def _recurse_visit_dict(cur, parent_path, visitor):
for k, v in cur.items():
key_path = k
if parent_path is not None:
key_path = parent_path + '/' + k
visitor(key_path, v, cur, k)
if isinstance(v, dict):
_recurse_visit_dict(v, key_path, visitor)
header_regex = re.compile(
r'(---\s*\n)(?P<header>(.*\n)*?)^(---\s*\n)', re.MULTILINE)
def parse_config_header(text):
m = header_regex.match(text)
if m is not None:
header = str(m.group('header'))
config = yaml.load(header, Loader=ConfigurationLoader)
offset = m.end()
else:
config = {}
offset = 0
return config, offset
class ConfigurationLoader(SafeLoader):
""" A YAML loader that loads mappings into ordered dictionaries.
"""
def __init__(self, *args, **kwargs):
super(ConfigurationLoader, self).__init__(*args, **kwargs)
self.add_constructor('tag:yaml.org,2002:map',
type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:omap',
type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:sexagesimal',
type(self).construct_yaml_time)
def construct_yaml_map(self, node):
data = collections.OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if not isinstance(node, yaml.MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = collections.OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if not isinstance(key, collections.Hashable):
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
time_regexp = re.compile(
r'''^(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
(:(?P<second>[0-9][0-9])
(\.(?P<fraction>[0-9]+))?)?$''', re.X)
def construct_yaml_time(self, node):
self.construct_scalar(node)
match = self.time_regexp.match(node.value)
values = match.groupdict()
hour = int(values['hour'])
minute = int(values['minute'])
second = 0
if values['second']:
second = int(values['second'])
usec = 0
if values['fraction']:
usec = float('0.' + values['fraction'])
return second + minute * 60 + hour * 60 * 60 + usec
ConfigurationLoader.add_implicit_resolver(
'tag:yaml.org,2002:sexagesimal',
re.compile(r'''^[0-9][0-9]?:[0-9][0-9]
(:[0-9][0-9](\.[0-9]+)?)?$''', re.X),
list('0123456789'))
# We need to add our `sexagesimal` resolver before the `int` one, which
# already supports sexagesimal notation in YAML 1.1 (but not 1.2). However,
# because we know we pretty much always want it for representing time, we
# need a simple `12:30` to mean 45000, not 750. So that's why we override
# the default behaviour.
for ch in list('0123456789'):
ch_resolvers = ConfigurationLoader.yaml_implicit_resolvers[ch]
ch_resolvers.insert(0, ch_resolvers.pop())
class ConfigurationDumper(yaml.SafeDumper):
def represent_ordered_dict(self, data):
# Not a typo: we're using `map` and not `omap` because we don't want
# ugly type tags printed in the generated YAML markup, and because
# we always load maps into `OrderedDicts` anyway.
return self.represent_mapping('tag:yaml.org,2002:map', data)
ConfigurationDumper.add_representer(collections.OrderedDict,
ConfigurationDumper.represent_ordered_dict)
| 30.62037 | 87 | 0.602056 | import re
import logging
import collections
import collections.abc
import yaml
from yaml.constructor import ConstructorError
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
logger = logging.getLogger(__name__)
default_allowed_types = (dict, list, tuple, float, int, bool, str)
class ConfigurationError(Exception):
pass
class Configuration(collections.abc.MutableMapping):
def __init__(self, values=None, validate=True):
if values is not None:
self.setAll(values, validate=validate)
else:
self._values = None
def __getitem__(self, key):
self._ensureLoaded()
try:
return get_dict_value(self._values, key)
except KeyError:
raise KeyError("No such item: %s" % key)
def __setitem__(self, key, value):
self._ensureLoaded()
value = self._validateValue(key, value)
set_dict_value(self._values, key, value)
def __delitem__(self, key):
raise NotImplementedError()
def __iter__(self):
self._ensureLoaded()
return iter(self._values)
def __len__(self):
self._ensureLoaded()
return len(self._values)
def has(self, key):
return key in self
def set(self, key, value):
self[key] = value
def setAll(self, values, validate=False):
if validate:
values = self._validateAll(values)
self._values = values
def getAll(self):
self._ensureLoaded()
return self._values
def merge(self, other):
self._ensureLoaded()
if isinstance(other, dict):
other_values = other
elif isinstance(other, Configuration):
other_values = other._values
else:
raise Exception(
"Unsupported value type to merge: %s" % type(other))
merge_dicts(self._values, other_values,
validator=self._validateValue)
def validateTypes(self, allowed_types=default_allowed_types):
self._validateDictTypesRecursive(self._values, allowed_types)
def _validateDictTypesRecursive(self, d, allowed_types):
for k, v in d.items():
if not isinstance(k, str):
raise ConfigurationError("Key '%s' is not a string." % k)
self._validateTypeRecursive(v, allowed_types)
def _validateListTypesRecursive(self, l, allowed_types):
for v in l:
self._validateTypeRecursive(v, allowed_types)
def _validateTypeRecursive(self, v, allowed_types):
if v is None:
return
if not isinstance(v, allowed_types):
raise ConfigurationError(
"Value '%s' is of forbidden type: %s" % (v, type(v)))
if isinstance(v, dict):
self._validateDictTypesRecursive(v, allowed_types)
elif isinstance(v, list):
self._validateListTypesRecursive(v, allowed_types)
def _ensureLoaded(self):
if self._values is None:
self._load()
def _load(self):
self._values = self._validateAll({})
def _validateAll(self, values):
return values
def _validateValue(self, key_path, value):
return value
def get_dict_value(d, key):
bits = key.split('/')
cur = d
for b in bits:
cur = cur[b]
return cur
def get_dict_values(*args):
for d, key in args:
try:
return get_dict_value(d, key)
except KeyError:
continue
raise KeyError()
def try_get_dict_value(d, key, *, default=None):
try:
return get_dict_value(d, key)
except KeyError:
return default
def try_get_dict_values(*args, default=None):
for d, key in args:
try:
return get_dict_value(d, key)
except KeyError:
continue
return default
def set_dict_value(d, key, value):
bits = key.split('/')
bitslen = len(bits)
cur = d
for i, b in enumerate(bits):
if i == bitslen - 1:
cur[b] = value
else:
if b not in cur:
cur[b] = {}
cur = cur[b]
MERGE_NEW_VALUES = 0
MERGE_OVERWRITE_VALUES = 1
MERGE_PREPEND_LISTS = 2
MERGE_APPEND_LISTS = 4
MERGE_ALL = MERGE_OVERWRITE_VALUES | MERGE_PREPEND_LISTS
def merge_dicts(source, merging, *args,
validator=None, mode=MERGE_ALL):
_recurse_merge_dicts(source, merging, None, validator, mode)
for other in args:
_recurse_merge_dicts(source, other, None, validator, mode)
return source
def _recurse_merge_dicts(local_cur, incoming_cur, parent_path,
validator, mode):
for k, v in incoming_cur.items():
key_path = k
if parent_path is not None:
key_path = parent_path + '/' + k
local_v = local_cur.get(k)
if local_v is not None:
if isinstance(v, dict) and isinstance(local_v, dict):
_recurse_merge_dicts(local_v, v, key_path,
validator, mode)
elif isinstance(v, list) and isinstance(local_v, list):
if mode & MERGE_PREPEND_LISTS:
local_cur[k] = v + local_v
elif mode & MERGE_APPEND_LISTS:
local_cur[k] = local_v + v
else:
if mode & MERGE_OVERWRITE_VALUES:
if validator is not None:
v = validator(key_path, v)
local_cur[k] = v
else:
if ((mode & (MERGE_PREPEND_LISTS | MERGE_APPEND_LISTS)) or
not isinstance(v, list)):
if validator is not None:
v = validator(key_path, v)
local_cur[k] = v
def visit_dict(subject, visitor):
_recurse_visit_dict(subject, None, visitor)
def _recurse_visit_dict(cur, parent_path, visitor):
for k, v in cur.items():
key_path = k
if parent_path is not None:
key_path = parent_path + '/' + k
visitor(key_path, v, cur, k)
if isinstance(v, dict):
_recurse_visit_dict(v, key_path, visitor)
header_regex = re.compile(
r'(---\s*\n)(?P<header>(.*\n)*?)^(---\s*\n)', re.MULTILINE)
def parse_config_header(text):
m = header_regex.match(text)
if m is not None:
header = str(m.group('header'))
config = yaml.load(header, Loader=ConfigurationLoader)
offset = m.end()
else:
config = {}
offset = 0
return config, offset
class ConfigurationLoader(SafeLoader):
def __init__(self, *args, **kwargs):
super(ConfigurationLoader, self).__init__(*args, **kwargs)
self.add_constructor('tag:yaml.org,2002:map',
type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:omap',
type(self).construct_yaml_map)
self.add_constructor('tag:yaml.org,2002:sexagesimal',
type(self).construct_yaml_time)
def construct_yaml_map(self, node):
data = collections.OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if not isinstance(node, yaml.MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = collections.OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if not isinstance(key, collections.Hashable):
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
time_regexp = re.compile(
r'''^(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
(:(?P<second>[0-9][0-9])
(\.(?P<fraction>[0-9]+))?)?$''', re.X)
def construct_yaml_time(self, node):
self.construct_scalar(node)
match = self.time_regexp.match(node.value)
values = match.groupdict()
hour = int(values['hour'])
minute = int(values['minute'])
second = 0
if values['second']:
second = int(values['second'])
usec = 0
if values['fraction']:
usec = float('0.' + values['fraction'])
return second + minute * 60 + hour * 60 * 60 + usec
ConfigurationLoader.add_implicit_resolver(
'tag:yaml.org,2002:sexagesimal',
re.compile(r'''^[0-9][0-9]?:[0-9][0-9]
(:[0-9][0-9](\.[0-9]+)?)?$''', re.X),
list('0123456789'))
# the default behaviour.
for ch in list('0123456789'):
ch_resolvers = ConfigurationLoader.yaml_implicit_resolvers[ch]
ch_resolvers.insert(0, ch_resolvers.pop())
class ConfigurationDumper(yaml.SafeDumper):
def represent_ordered_dict(self, data):
# Not a typo: we're using `map` and not `omap` because we don't want
# ugly type tags printed in the generated YAML markup, and because
# we always load maps into `OrderedDicts` anyway.
return self.represent_mapping('tag:yaml.org,2002:map', data)
ConfigurationDumper.add_representer(collections.OrderedDict,
ConfigurationDumper.represent_ordered_dict)
| true | true |
1c3b15ea2713c8f46b54b18b22d0365b0ad1f460 | 62,964 | py | Python | sympy/integrals/manualintegrate.py | ricopicone/sympy | de27c97214d540247a35c8215c7920e9a46b54ed | [
"BSD-3-Clause"
] | 2 | 2019-02-05T19:20:24.000Z | 2019-04-23T13:24:38.000Z | sympy/integrals/manualintegrate.py | ricopicone/sympy | de27c97214d540247a35c8215c7920e9a46b54ed | [
"BSD-3-Clause"
] | null | null | null | sympy/integrals/manualintegrate.py | ricopicone/sympy | de27c97214d540247a35c8215c7920e9a46b54ed | [
"BSD-3-Clause"
] | 1 | 2019-10-15T10:55:42.000Z | 2019-10-15T10:55:42.000Z | """Integration method that emulates by-hand techniques.
This module also provides functionality to get the steps used to evaluate a
particular integral, in the ``integral_steps`` function. This will return
nested namedtuples representing the integration rules used. The
``manualintegrate`` function computes the integral using those steps given
an integrand; given the steps, ``_manualintegrate`` will evaluate them.
The integrator can be extended with new heuristics and evaluation
techniques. To do so, write a function that accepts an ``IntegralInfo``
object and returns either a namedtuple representing a rule or
``None``. Then, write another function that accepts the namedtuple's fields
and returns the antiderivative, and decorate it with
``@evaluates(namedtuple_type)``. If the new technique requires a new
match, add the key and call to the antiderivative function to integral_steps.
To enable simple substitutions, add the match to find_substitutions.
"""
from __future__ import print_function, division
from collections import namedtuple, defaultdict
import sympy
from sympy.core.compatibility import reduce, Mapping, iterable
from sympy.core.containers import Dict
from sympy.core.logic import fuzzy_not
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.functions.special.polynomials import OrthogonalPolynomial
from sympy.functions.elementary.piecewise import Piecewise
from sympy.strategies.core import switch, do_one, null_safe, condition
from sympy.core.relational import Eq, Ne
from sympy.polys.polytools import degree
from sympy.ntheory.factor_ import divisors
from sympy.utilities.misc import debug
ZERO = sympy.S.Zero
def Rule(name, props=""):
# GOTCHA: namedtuple class name not considered!
def __eq__(self, other):
return self.__class__ == other.__class__ and tuple.__eq__(self, other)
__neq__ = lambda self, other: not __eq__(self, other)
cls = namedtuple(name, props + " context symbol")
cls.__eq__ = __eq__
cls.__ne__ = __neq__
return cls
ConstantRule = Rule("ConstantRule", "constant")
ConstantTimesRule = Rule("ConstantTimesRule", "constant other substep")
PowerRule = Rule("PowerRule", "base exp")
AddRule = Rule("AddRule", "substeps")
URule = Rule("URule", "u_var u_func constant substep")
PartsRule = Rule("PartsRule", "u dv v_step second_step")
CyclicPartsRule = Rule("CyclicPartsRule", "parts_rules coefficient")
TrigRule = Rule("TrigRule", "func arg")
ExpRule = Rule("ExpRule", "base exp")
ReciprocalRule = Rule("ReciprocalRule", "func")
ArcsinRule = Rule("ArcsinRule")
InverseHyperbolicRule = Rule("InverseHyperbolicRule", "func")
AlternativeRule = Rule("AlternativeRule", "alternatives")
DontKnowRule = Rule("DontKnowRule")
DerivativeRule = Rule("DerivativeRule")
RewriteRule = Rule("RewriteRule", "rewritten substep")
PiecewiseRule = Rule("PiecewiseRule", "subfunctions")
HeavisideRule = Rule("HeavisideRule", "harg ibnd substep")
TrigSubstitutionRule = Rule("TrigSubstitutionRule",
"theta func rewritten substep restriction")
ArctanRule = Rule("ArctanRule", "a b c")
ArccothRule = Rule("ArccothRule", "a b c")
ArctanhRule = Rule("ArctanhRule", "a b c")
JacobiRule = Rule("JacobiRule", "n a b")
GegenbauerRule = Rule("GegenbauerRule", "n a")
ChebyshevTRule = Rule("ChebyshevTRule", "n")
ChebyshevURule = Rule("ChebyshevURule", "n")
LegendreRule = Rule("LegendreRule", "n")
HermiteRule = Rule("HermiteRule", "n")
LaguerreRule = Rule("LaguerreRule", "n")
AssocLaguerreRule = Rule("AssocLaguerreRule", "n a")
CiRule = Rule("CiRule", "a b")
ChiRule = Rule("ChiRule", "a b")
EiRule = Rule("EiRule", "a b")
SiRule = Rule("SiRule", "a b")
ShiRule = Rule("ShiRule", "a b")
ErfRule = Rule("ErfRule", "a b c")
FresnelCRule = Rule("FresnelCRule", "a b c")
FresnelSRule = Rule("FresnelSRule", "a b c")
LiRule = Rule("LiRule", "a b")
PolylogRule = Rule("PolylogRule", "a b")
UpperGammaRule = Rule("UpperGammaRule", "a e")
EllipticFRule = Rule("EllipticFRule", "a d")
EllipticERule = Rule("EllipticERule", "a d")
IntegralInfo = namedtuple('IntegralInfo', 'integrand symbol')
evaluators = {}
def evaluates(rule):
def _evaluates(func):
func.rule = rule
evaluators[rule] = func
return func
return _evaluates
def contains_dont_know(rule):
if isinstance(rule, DontKnowRule):
return True
else:
for val in rule:
if isinstance(val, tuple):
if contains_dont_know(val):
return True
elif isinstance(val, list):
if any(contains_dont_know(i) for i in val):
return True
return False
def manual_diff(f, symbol):
"""Derivative of f in form expected by find_substitutions
SymPy's derivatives for some trig functions (like cot) aren't in a form
that works well with finding substitutions; this replaces the
derivatives for those particular forms with something that works better.
"""
if f.args:
arg = f.args[0]
if isinstance(f, sympy.tan):
return arg.diff(symbol) * sympy.sec(arg)**2
elif isinstance(f, sympy.cot):
return -arg.diff(symbol) * sympy.csc(arg)**2
elif isinstance(f, sympy.sec):
return arg.diff(symbol) * sympy.sec(arg) * sympy.tan(arg)
elif isinstance(f, sympy.csc):
return -arg.diff(symbol) * sympy.csc(arg) * sympy.cot(arg)
elif isinstance(f, sympy.Add):
return sum([manual_diff(arg, symbol) for arg in f.args])
elif isinstance(f, sympy.Mul):
if len(f.args) == 2 and isinstance(f.args[0], sympy.Number):
return f.args[0] * manual_diff(f.args[1], symbol)
return f.diff(symbol)
def manual_subs(expr, *args):
"""
A wrapper for `expr.subs(*args)` with additional logic for substitution
of invertible functions.
"""
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, (Dict, Mapping)):
sequence = sequence.items()
elif not iterable(sequence):
raise ValueError("Expected an iterable of (old, new) pairs")
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
new_subs = []
for old, new in sequence:
if isinstance(old, sympy.log):
# If log(x) = y, then exp(a*log(x)) = exp(a*y)
# that is, x**a = exp(a*y). Replace nontrivial powers of x
# before subs turns them into `exp(y)**a`, but
# do not replace x itself yet, to avoid `log(exp(y))`.
x0 = old.args[0]
expr = expr.replace(lambda x: x.is_Pow and x.base == x0,
lambda x: sympy.exp(x.exp*new))
new_subs.append((x0, sympy.exp(new)))
return expr.subs(list(sequence) + new_subs)
# Method based on that on SIN, described in "Symbolic Integration: The
# Stormy Decade"
def find_substitutions(integrand, symbol, u_var):
results = []
def test_subterm(u, u_diff):
if u_diff == 0:
return False
substituted = integrand / u_diff
if symbol not in substituted.free_symbols:
# replaced everything already
return False
debug("substituted: {}, u: {}, u_var: {}".format(substituted, u, u_var))
substituted = manual_subs(substituted, u, u_var).cancel()
if symbol not in substituted.free_symbols:
# avoid increasing the degree of a rational function
if integrand.is_rational_function(symbol) and substituted.is_rational_function(u_var):
deg_before = max([degree(t, symbol) for t in integrand.as_numer_denom()])
deg_after = max([degree(t, u_var) for t in substituted.as_numer_denom()])
if deg_after > deg_before:
return False
return substituted.as_independent(u_var, as_Add=False)
# special treatment for substitutions u = (a*x+b)**(1/n)
if (isinstance(u, sympy.Pow) and (1/u.exp).is_Integer and
sympy.Abs(u.exp) < 1):
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
match = u.base.match(a*symbol + b)
if match:
a, b = [match.get(i, ZERO) for i in (a, b)]
if a != 0 and b != 0:
substituted = substituted.subs(symbol,
(u_var**(1/u.exp) - b)/a)
return substituted.as_independent(u_var, as_Add=False)
return False
def possible_subterms(term):
if isinstance(term, (TrigonometricFunction,
sympy.asin, sympy.acos, sympy.atan,
sympy.exp, sympy.log, sympy.Heaviside)):
return [term.args[0]]
elif isinstance(term, (sympy.chebyshevt, sympy.chebyshevu,
sympy.legendre, sympy.hermite, sympy.laguerre)):
return [term.args[1]]
elif isinstance(term, (sympy.gegenbauer, sympy.assoc_laguerre)):
return [term.args[2]]
elif isinstance(term, sympy.jacobi):
return [term.args[3]]
elif isinstance(term, sympy.Mul):
r = []
for u in term.args:
r.append(u)
r.extend(possible_subterms(u))
return r
elif isinstance(term, sympy.Pow):
r = []
if term.args[1].is_constant(symbol):
r.append(term.args[0])
elif term.args[0].is_constant(symbol):
r.append(term.args[1])
if term.args[1].is_Integer:
r.extend([term.args[0]**d for d in divisors(term.args[1])
if 1 < d < abs(term.args[1])])
if term.args[0].is_Add:
r.extend([t for t in possible_subterms(term.args[0])
if t.is_Pow])
return r
elif isinstance(term, sympy.Add):
r = []
for arg in term.args:
r.append(arg)
r.extend(possible_subterms(arg))
return r
return []
for u in possible_subterms(integrand):
if u == symbol:
continue
u_diff = manual_diff(u, symbol)
new_integrand = test_subterm(u, u_diff)
if new_integrand is not False:
constant, new_integrand = new_integrand
if new_integrand == integrand.subs(symbol, u_var):
continue
substitution = (u, constant, new_integrand)
if substitution not in results:
results.append(substitution)
return results
def rewriter(condition, rewrite):
"""Strategy that rewrites an integrand."""
def _rewriter(integral):
integrand, symbol = integral
debug("Integral: {} is rewritten with {} on symbol: {}".format(integrand, rewrite, symbol))
if condition(*integral):
rewritten = rewrite(*integral)
if rewritten != integrand:
substep = integral_steps(rewritten, symbol)
if not isinstance(substep, DontKnowRule) and substep:
return RewriteRule(
rewritten,
substep,
integrand, symbol)
return _rewriter
def proxy_rewriter(condition, rewrite):
"""Strategy that rewrites an integrand based on some other criteria."""
def _proxy_rewriter(criteria):
criteria, integral = criteria
integrand, symbol = integral
debug("Integral: {} is rewritten with {} on symbol: {} and criteria: {}".format(integrand, rewrite, symbol, criteria))
args = criteria + list(integral)
if condition(*args):
rewritten = rewrite(*args)
if rewritten != integrand:
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol)
return _proxy_rewriter
def multiplexer(conditions):
"""Apply the rule that matches the condition, else None"""
def multiplexer_rl(expr):
for key, rule in conditions.items():
if key(expr):
return rule(expr)
return multiplexer_rl
def alternatives(*rules):
"""Strategy that makes an AlternativeRule out of multiple possible results."""
def _alternatives(integral):
alts = []
count = 0
debug("List of Alternative Rules")
for rule in rules:
count = count + 1
debug("Rule {}: {}".format(count, rule))
result = rule(integral)
if (result and not isinstance(result, DontKnowRule) and
result != integral and result not in alts):
alts.append(result)
if len(alts) == 1:
return alts[0]
elif alts:
doable = [rule for rule in alts if not contains_dont_know(rule)]
if doable:
return AlternativeRule(doable, *integral)
else:
return AlternativeRule(alts, *integral)
return _alternatives
def constant_rule(integral):
integrand, symbol = integral
return ConstantRule(integral.integrand, *integral)
def power_rule(integral):
integrand, symbol = integral
base, exp = integrand.as_base_exp()
if symbol not in exp.free_symbols and isinstance(base, sympy.Symbol):
if sympy.simplify(exp + 1) == 0:
return ReciprocalRule(base, integrand, symbol)
return PowerRule(base, exp, integrand, symbol)
elif symbol not in base.free_symbols and isinstance(exp, sympy.Symbol):
rule = ExpRule(base, exp, integrand, symbol)
if fuzzy_not(sympy.log(base).is_zero):
return rule
elif sympy.log(base).is_zero:
return ConstantRule(1, 1, symbol)
return PiecewiseRule([
(rule, sympy.Ne(sympy.log(base), 0)),
(ConstantRule(1, 1, symbol), True)
], integrand, symbol)
def exp_rule(integral):
integrand, symbol = integral
if isinstance(integrand.args[0], sympy.Symbol):
return ExpRule(sympy.E, integrand.args[0], integrand, symbol)
def orthogonal_poly_rule(integral):
orthogonal_poly_classes = {
sympy.jacobi: JacobiRule,
sympy.gegenbauer: GegenbauerRule,
sympy.chebyshevt: ChebyshevTRule,
sympy.chebyshevu: ChebyshevURule,
sympy.legendre: LegendreRule,
sympy.hermite: HermiteRule,
sympy.laguerre: LaguerreRule,
sympy.assoc_laguerre: AssocLaguerreRule
}
orthogonal_poly_var_index = {
sympy.jacobi: 3,
sympy.gegenbauer: 2,
sympy.assoc_laguerre: 2
}
integrand, symbol = integral
for klass in orthogonal_poly_classes:
if isinstance(integrand, klass):
var_index = orthogonal_poly_var_index.get(klass, 1)
if (integrand.args[var_index] is symbol and not
any(v.has(symbol) for v in integrand.args[:var_index])):
args = integrand.args[:var_index] + (integrand, symbol)
return orthogonal_poly_classes[klass](*args)
def special_function_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[symbol], properties=[lambda x: not x.is_zero])
b = sympy.Wild('b', exclude=[symbol])
c = sympy.Wild('c', exclude=[symbol])
d = sympy.Wild('d', exclude=[symbol], properties=[lambda x: not x.is_zero])
e = sympy.Wild('e', exclude=[symbol], properties=[
lambda x: not (x.is_nonnegative and x.is_integer)])
wilds = (a, b, c, d, e)
# patterns consist of a SymPy class, a wildcard expr, an optional
# condition coded as a lambda (when Wild properties are not enough),
# followed by an applicable rule
patterns = (
(sympy.Mul, sympy.exp(a*symbol + b)/symbol, None, EiRule),
(sympy.Mul, sympy.cos(a*symbol + b)/symbol, None, CiRule),
(sympy.Mul, sympy.cosh(a*symbol + b)/symbol, None, ChiRule),
(sympy.Mul, sympy.sin(a*symbol + b)/symbol, None, SiRule),
(sympy.Mul, sympy.sinh(a*symbol + b)/symbol, None, ShiRule),
(sympy.Pow, 1/sympy.log(a*symbol + b), None, LiRule),
(sympy.exp, sympy.exp(a*symbol**2 + b*symbol + c), None, ErfRule),
(sympy.sin, sympy.sin(a*symbol**2 + b*symbol + c), None, FresnelSRule),
(sympy.cos, sympy.cos(a*symbol**2 + b*symbol + c), None, FresnelCRule),
(sympy.Mul, symbol**e*sympy.exp(a*symbol), None, UpperGammaRule),
(sympy.Mul, sympy.polylog(b, a*symbol)/symbol, None, PolylogRule),
(sympy.Pow, 1/sympy.sqrt(a - d*sympy.sin(symbol)**2),
lambda a, d: a != d, EllipticFRule),
(sympy.Pow, sympy.sqrt(a - d*sympy.sin(symbol)**2),
lambda a, d: a != d, EllipticERule),
)
for p in patterns:
if isinstance(integrand, p[0]):
match = integrand.match(p[1])
if match:
wild_vals = tuple(match.get(w) for w in wilds
if match.get(w) is not None)
if p[2] is None or p[2](*wild_vals):
args = wild_vals + (integrand, symbol)
return p[3](*args)
def inverse_trig_rule(integral):
integrand, symbol = integral
base, exp = integrand.as_base_exp()
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
match = base.match(a + b*symbol**2)
if not match:
return
def negative(x):
return x.is_negative or x.could_extract_minus_sign()
def ArcsinhRule(integrand, symbol):
return InverseHyperbolicRule(sympy.asinh, integrand, symbol)
def ArccoshRule(integrand, symbol):
return InverseHyperbolicRule(sympy.acosh, integrand, symbol)
def make_inverse_trig(RuleClass, base_exp, a, sign_a, b, sign_b):
u_var = sympy.Dummy("u")
current_base = base
current_symbol = symbol
constant = u_func = u_constant = substep = None
factored = integrand
if a != 1:
constant = a**base_exp
current_base = sign_a + sign_b * (b/a) * current_symbol**2
factored = current_base ** base_exp
if (b/a) != 1:
u_func = sympy.sqrt(b/a) * symbol
u_constant = sympy.sqrt(a/b)
current_symbol = u_var
current_base = sign_a + sign_b * current_symbol**2
substep = RuleClass(current_base ** base_exp, current_symbol)
if u_func is not None:
if u_constant != 1 and substep is not None:
substep = ConstantTimesRule(
u_constant, current_base ** base_exp, substep,
u_constant * current_base ** base_exp, symbol)
substep = URule(u_var, u_func, u_constant, substep, factored, symbol)
if constant is not None and substep is not None:
substep = ConstantTimesRule(constant, factored, substep, integrand, symbol)
return substep
a, b = [match.get(i, ZERO) for i in (a, b)]
# list of (rule, base_exp, a, sign_a, b, sign_b, condition)
possibilities = []
if sympy.simplify(2*exp + 1) == 0:
possibilities.append((ArcsinRule, exp, a, 1, -b, -1, sympy.And(a > 0, b < 0)))
possibilities.append((ArcsinhRule, exp, a, 1, b, 1, sympy.And(a > 0, b > 0)))
possibilities.append((ArccoshRule, exp, -a, -1, b, 1, sympy.And(a < 0, b > 0)))
possibilities = [p for p in possibilities if p[-1] is not sympy.false]
if a.is_number and b.is_number:
possibility = [p for p in possibilities if p[-1] is sympy.true]
if len(possibility) == 1:
return make_inverse_trig(*possibility[0][:-1])
elif possibilities:
return PiecewiseRule(
[(make_inverse_trig(*p[:-1]), p[-1]) for p in possibilities],
integrand, symbol)
def add_rule(integral):
integrand, symbol = integral
results = [integral_steps(g, symbol)
for g in integrand.as_ordered_terms()]
return None if None in results else AddRule(results, integrand, symbol)
def mul_rule(integral):
integrand, symbol = integral
# Constant times function case
coeff, f = integrand.as_independent(symbol)
next_step = integral_steps(f, symbol)
if coeff != 1 and next_step is not None:
return ConstantTimesRule(
coeff, f,
next_step,
integrand, symbol)
def _parts_rule(integrand, symbol):
# LIATE rule:
# log, inverse trig, algebraic, trigonometric, exponential
def pull_out_algebraic(integrand):
integrand = integrand.cancel().together()
# iterating over Piecewise args would not work here
algebraic = ([] if isinstance(integrand, sympy.Piecewise)
else [arg for arg in integrand.args if arg.is_algebraic_expr(symbol)])
if algebraic:
u = sympy.Mul(*algebraic)
dv = (integrand / u).cancel()
return u, dv
def pull_out_u(*functions):
def pull_out_u_rl(integrand):
if any([integrand.has(f) for f in functions]):
args = [arg for arg in integrand.args
if any(isinstance(arg, cls) for cls in functions)]
if args:
u = reduce(lambda a,b: a*b, args)
dv = integrand / u
return u, dv
return pull_out_u_rl
liate_rules = [pull_out_u(sympy.log), pull_out_u(sympy.atan, sympy.asin, sympy.acos),
pull_out_algebraic, pull_out_u(sympy.sin, sympy.cos),
pull_out_u(sympy.exp)]
dummy = sympy.Dummy("temporary")
# we can integrate log(x) and atan(x) by setting dv = 1
if isinstance(integrand, (sympy.log, sympy.atan, sympy.asin, sympy.acos)):
integrand = dummy * integrand
for index, rule in enumerate(liate_rules):
result = rule(integrand)
if result:
u, dv = result
# Don't pick u to be a constant if possible
if symbol not in u.free_symbols and not u.has(dummy):
return
u = u.subs(dummy, 1)
dv = dv.subs(dummy, 1)
# Don't pick a non-polynomial algebraic to be differentiated
if rule == pull_out_algebraic and not u.is_polynomial(symbol):
return
# Don't trade one logarithm for another
if isinstance(u, sympy.log):
rec_dv = 1/dv
if (rec_dv.is_polynomial(symbol) and
degree(rec_dv, symbol) == 1):
return
# Can integrate a polynomial times OrthogonalPolynomial
if rule == pull_out_algebraic and isinstance(dv, OrthogonalPolynomial):
v_step = integral_steps(dv, symbol)
if contains_dont_know(v_step):
return
else:
du = u.diff(symbol)
v = _manualintegrate(v_step)
return u, dv, v, du, v_step
# make sure dv is amenable to integration
accept = False
if index < 2: # log and inverse trig are usually worth trying
accept = True
elif (rule == pull_out_algebraic and dv.args and
all(isinstance(a, (sympy.sin, sympy.cos, sympy.exp))
for a in dv.args)):
accept = True
else:
for rule in liate_rules[index + 1:]:
r = rule(integrand)
if r and r[0].subs(dummy, 1).equals(dv):
accept = True
break
if accept:
du = u.diff(symbol)
v_step = integral_steps(sympy.simplify(dv), symbol)
if not contains_dont_know(v_step):
v = _manualintegrate(v_step)
return u, dv, v, du, v_step
def parts_rule(integral):
integrand, symbol = integral
constant, integrand = integrand.as_coeff_Mul()
result = _parts_rule(integrand, symbol)
steps = []
if result:
u, dv, v, du, v_step = result
debug("u : {}, dv : {}, v : {}, du : {}, v_step: {}".format(u, dv, v, du, v_step))
steps.append(result)
if isinstance(v, sympy.Integral):
return
# Set a limit on the number of times u can be used
if isinstance(u, (sympy.sin, sympy.cos, sympy.exp, sympy.sinh, sympy.cosh)):
cachekey = u.xreplace({symbol: _cache_dummy})
if _parts_u_cache[cachekey] > 2:
return
_parts_u_cache[cachekey] += 1
# Try cyclic integration by parts a few times
for _ in range(4):
debug("Cyclic integration {} with v: {}, du: {}, integrand: {}".format(_, v, du, integrand))
coefficient = ((v * du) / integrand).cancel()
if coefficient == 1:
break
if symbol not in coefficient.free_symbols:
rule = CyclicPartsRule(
[PartsRule(u, dv, v_step, None, None, None)
for (u, dv, v, du, v_step) in steps],
(-1) ** len(steps) * coefficient,
integrand, symbol
)
if (constant != 1) and rule:
rule = ConstantTimesRule(constant, integrand, rule,
constant * integrand, symbol)
return rule
# _parts_rule is sensitive to constants, factor it out
next_constant, next_integrand = (v * du).as_coeff_Mul()
result = _parts_rule(next_integrand, symbol)
if result:
u, dv, v, du, v_step = result
u *= next_constant
du *= next_constant
steps.append((u, dv, v, du, v_step))
else:
break
def make_second_step(steps, integrand):
if steps:
u, dv, v, du, v_step = steps[0]
return PartsRule(u, dv, v_step,
make_second_step(steps[1:], v * du),
integrand, symbol)
else:
steps = integral_steps(integrand, symbol)
if steps:
return steps
else:
return DontKnowRule(integrand, symbol)
if steps:
u, dv, v, du, v_step = steps[0]
rule = PartsRule(u, dv, v_step,
make_second_step(steps[1:], v * du),
integrand, symbol)
if (constant != 1) and rule:
rule = ConstantTimesRule(constant, integrand, rule,
constant * integrand, symbol)
return rule
def trig_rule(integral):
integrand, symbol = integral
if isinstance(integrand, sympy.sin) or isinstance(integrand, sympy.cos):
arg = integrand.args[0]
if not isinstance(arg, sympy.Symbol):
return # perhaps a substitution can deal with it
if isinstance(integrand, sympy.sin):
func = 'sin'
else:
func = 'cos'
return TrigRule(func, arg, integrand, symbol)
if integrand == sympy.sec(symbol)**2:
return TrigRule('sec**2', symbol, integrand, symbol)
elif integrand == sympy.csc(symbol)**2:
return TrigRule('csc**2', symbol, integrand, symbol)
if isinstance(integrand, sympy.tan):
rewritten = sympy.sin(*integrand.args) / sympy.cos(*integrand.args)
elif isinstance(integrand, sympy.cot):
rewritten = sympy.cos(*integrand.args) / sympy.sin(*integrand.args)
elif isinstance(integrand, sympy.sec):
arg = integrand.args[0]
rewritten = ((sympy.sec(arg)**2 + sympy.tan(arg) * sympy.sec(arg)) /
(sympy.sec(arg) + sympy.tan(arg)))
elif isinstance(integrand, sympy.csc):
arg = integrand.args[0]
rewritten = ((sympy.csc(arg)**2 + sympy.cot(arg) * sympy.csc(arg)) /
(sympy.csc(arg) + sympy.cot(arg)))
else:
return
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol
)
def trig_product_rule(integral):
integrand, symbol = integral
sectan = sympy.sec(symbol) * sympy.tan(symbol)
q = integrand / sectan
if symbol not in q.free_symbols:
rule = TrigRule('sec*tan', symbol, sectan, symbol)
if q != 1 and rule:
rule = ConstantTimesRule(q, sectan, rule, integrand, symbol)
return rule
csccot = -sympy.csc(symbol) * sympy.cot(symbol)
q = integrand / csccot
if symbol not in q.free_symbols:
rule = TrigRule('csc*cot', symbol, csccot, symbol)
if q != 1 and rule:
rule = ConstantTimesRule(q, csccot, rule, integrand, symbol)
return rule
def quadratic_denom_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
c = sympy.Wild('c', exclude=[symbol])
match = integrand.match(a / (b * symbol ** 2 + c))
if match:
a, b, c = match[a], match[b], match[c]
if b.is_extended_real and c.is_extended_real:
return PiecewiseRule([(ArctanRule(a, b, c, integrand, symbol), sympy.Gt(c / b, 0)),
(ArccothRule(a, b, c, integrand, symbol), sympy.And(sympy.Gt(symbol ** 2, -c / b), sympy.Lt(c / b, 0))),
(ArctanhRule(a, b, c, integrand, symbol), sympy.And(sympy.Lt(symbol ** 2, -c / b), sympy.Lt(c / b, 0))),
], integrand, symbol)
else:
return ArctanRule(a, b, c, integrand, symbol)
d = sympy.Wild('d', exclude=[symbol])
match2 = integrand.match(a / (b * symbol ** 2 + c * symbol + d))
if match2:
b, c = match2[b], match2[c]
if b.is_zero:
return
u = sympy.Dummy('u')
u_func = symbol + c/(2*b)
integrand2 = integrand.subs(symbol, u - c / (2*b))
next_step = integral_steps(integrand2, u)
if next_step:
return URule(u, u_func, None, next_step, integrand2, symbol)
else:
return
e = sympy.Wild('e', exclude=[symbol])
match3 = integrand.match((a* symbol + b) / (c * symbol ** 2 + d * symbol + e))
if match3:
a, b, c, d, e = match3[a], match3[b], match3[c], match3[d], match3[e]
if c.is_zero:
return
denominator = c * symbol**2 + d * symbol + e
const = a/(2*c)
numer1 = (2*c*symbol+d)
numer2 = - const*d + b
u = sympy.Dummy('u')
step1 = URule(u,
denominator,
const,
integral_steps(u**(-1), u),
integrand,
symbol)
if const != 1:
step1 = ConstantTimesRule(const,
numer1/denominator,
step1,
const*numer1/denominator,
symbol)
if numer2.is_zero:
return step1
step2 = integral_steps(numer2/denominator, symbol)
substeps = AddRule([step1, step2], integrand, symbol)
rewriten = const*numer1/denominator+numer2/denominator
return RewriteRule(rewriten, substeps, integrand, symbol)
return
def root_mul_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
c = sympy.Wild('c')
match = integrand.match(sympy.sqrt(a * symbol + b) * c)
if not match:
return
a, b, c = match[a], match[b], match[c]
d = sympy.Wild('d', exclude=[symbol])
e = sympy.Wild('e', exclude=[symbol])
f = sympy.Wild('f')
recursion_test = c.match(sympy.sqrt(d * symbol + e) * f)
if recursion_test:
return
u = sympy.Dummy('u')
u_func = sympy.sqrt(a * symbol + b)
integrand = integrand.subs(u_func, u)
integrand = integrand.subs(symbol, (u**2 - b) / a)
integrand = integrand * 2 * u / a
next_step = integral_steps(integrand, u)
if next_step:
return URule(u, u_func, None, next_step, integrand, symbol)
@sympy.cacheit
def make_wilds(symbol):
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
m = sympy.Wild('m', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)])
n = sympy.Wild('n', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)])
return a, b, m, n
@sympy.cacheit
def sincos_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.sin(a*symbol)**m * sympy.cos(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def tansec_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.tan(a*symbol)**m * sympy.sec(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def cotcsc_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.cot(a*symbol)**m * sympy.csc(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def heaviside_pattern(symbol):
m = sympy.Wild('m', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
g = sympy.Wild('g')
pattern = sympy.Heaviside(m*symbol + b) * g
return pattern, m, b, g
def uncurry(func):
def uncurry_rl(args):
return func(*args)
return uncurry_rl
def trig_rewriter(rewrite):
def trig_rewriter_rl(args):
a, b, m, n, integrand, symbol = args
rewritten = rewrite(a, b, m, n, integrand, symbol)
if rewritten != integrand:
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol)
return trig_rewriter_rl
sincos_botheven_condition = uncurry(
lambda a, b, m, n, i, s: m.is_even and n.is_even and
m.is_nonnegative and n.is_nonnegative)
sincos_botheven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (((1 - sympy.cos(2*a*symbol)) / 2) ** (m / 2)) *
(((1 + sympy.cos(2*b*symbol)) / 2) ** (n / 2)) ))
sincos_sinodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd and m >= 3)
sincos_sinodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - sympy.cos(a*symbol)**2)**((m - 1) / 2) *
sympy.sin(a*symbol) *
sympy.cos(b*symbol) ** n))
sincos_cosodd_condition = uncurry(lambda a, b, m, n, i, s: n.is_odd and n >= 3)
sincos_cosodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - sympy.sin(b*symbol)**2)**((n - 1) / 2) *
sympy.cos(b*symbol) *
sympy.sin(a*symbol) ** m))
tansec_seceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
tansec_seceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + sympy.tan(b*symbol)**2) ** (n/2 - 1) *
sympy.sec(b*symbol)**2 *
sympy.tan(a*symbol) ** m ))
tansec_tanodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
tansec_tanodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (sympy.sec(a*symbol)**2 - 1) ** ((m - 1) / 2) *
sympy.tan(a*symbol) *
sympy.sec(b*symbol) ** n ))
tan_tansquared_condition = uncurry(lambda a, b, m, n, i, s: m == 2 and n == 0)
tan_tansquared = trig_rewriter(
lambda a, b, m, n, i, symbol: ( sympy.sec(a*symbol)**2 - 1))
cotcsc_csceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
cotcsc_csceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + sympy.cot(b*symbol)**2) ** (n/2 - 1) *
sympy.csc(b*symbol)**2 *
sympy.cot(a*symbol) ** m ))
cotcsc_cotodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
cotcsc_cotodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (sympy.csc(a*symbol)**2 - 1) ** ((m - 1) / 2) *
sympy.cot(a*symbol) *
sympy.csc(b*symbol) ** n ))
def trig_sincos_rule(integral):
integrand, symbol = integral
if any(integrand.has(f) for f in (sympy.sin, sympy.cos)):
pattern, a, b, m, n = sincos_pattern(symbol)
match = integrand.match(pattern)
if not match:
return
return multiplexer({
sincos_botheven_condition: sincos_botheven,
sincos_sinodd_condition: sincos_sinodd,
sincos_cosodd_condition: sincos_cosodd
})(tuple(
[match.get(i, ZERO) for i in (a, b, m, n)] +
[integrand, symbol]))
def trig_tansec_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / sympy.cos(symbol): sympy.sec(symbol)
})
if any(integrand.has(f) for f in (sympy.tan, sympy.sec)):
pattern, a, b, m, n = tansec_pattern(symbol)
match = integrand.match(pattern)
if not match:
return
return multiplexer({
tansec_tanodd_condition: tansec_tanodd,
tansec_seceven_condition: tansec_seceven,
tan_tansquared_condition: tan_tansquared
})(tuple(
[match.get(i, ZERO) for i in (a, b, m, n)] +
[integrand, symbol]))
def trig_cotcsc_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / sympy.sin(symbol): sympy.csc(symbol),
1 / sympy.tan(symbol): sympy.cot(symbol),
sympy.cos(symbol) / sympy.tan(symbol): sympy.cot(symbol)
})
if any(integrand.has(f) for f in (sympy.cot, sympy.csc)):
pattern, a, b, m, n = cotcsc_pattern(symbol)
match = integrand.match(pattern)
if not match:
return
return multiplexer({
cotcsc_cotodd_condition: cotcsc_cotodd,
cotcsc_csceven_condition: cotcsc_csceven
})(tuple(
[match.get(i, ZERO) for i in (a, b, m, n)] +
[integrand, symbol]))
def trig_sindouble_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[sympy.sin(2*symbol)])
match = integrand.match(sympy.sin(2*symbol)*a)
if match:
sin_double = 2*sympy.sin(symbol)*sympy.cos(symbol)/sympy.sin(2*symbol)
return integral_steps(integrand * sin_double, symbol)
def trig_powers_products_rule(integral):
return do_one(null_safe(trig_sincos_rule),
null_safe(trig_tansec_rule),
null_safe(trig_cotcsc_rule),
null_safe(trig_sindouble_rule))(integral)
def trig_substitution_rule(integral):
integrand, symbol = integral
A = sympy.Wild('a', exclude=[0, symbol])
B = sympy.Wild('b', exclude=[0, symbol])
theta = sympy.Dummy("theta")
target_pattern = A + B*symbol**2
matches = integrand.find(target_pattern)
for expr in matches:
match = expr.match(target_pattern)
a = match.get(A, ZERO)
b = match.get(B, ZERO)
a_positive = ((a.is_number and a > 0) or a.is_positive)
b_positive = ((b.is_number and b > 0) or b.is_positive)
a_negative = ((a.is_number and a < 0) or a.is_negative)
b_negative = ((b.is_number and b < 0) or b.is_negative)
x_func = None
if a_positive and b_positive:
# a**2 + b*x**2. Assume sec(theta) > 0, -pi/2 < theta < pi/2
x_func = (sympy.sqrt(a)/sympy.sqrt(b)) * sympy.tan(theta)
# Do not restrict the domain: tan(theta) takes on any real
# value on the interval -pi/2 < theta < pi/2 so x takes on
# any value
restriction = True
elif a_positive and b_negative:
# a**2 - b*x**2. Assume cos(theta) > 0, -pi/2 < theta < pi/2
constant = sympy.sqrt(a)/sympy.sqrt(-b)
x_func = constant * sympy.sin(theta)
restriction = sympy.And(symbol > -constant, symbol < constant)
elif a_negative and b_positive:
# b*x**2 - a**2. Assume sin(theta) > 0, 0 < theta < pi
constant = sympy.sqrt(-a)/sympy.sqrt(b)
x_func = constant * sympy.sec(theta)
restriction = sympy.And(symbol > -constant, symbol < constant)
if x_func:
# Manually simplify sqrt(trig(theta)**2) to trig(theta)
# Valid due to assumed domain restriction
substitutions = {}
for f in [sympy.sin, sympy.cos, sympy.tan,
sympy.sec, sympy.csc, sympy.cot]:
substitutions[sympy.sqrt(f(theta)**2)] = f(theta)
substitutions[sympy.sqrt(f(theta)**(-2))] = 1/f(theta)
replaced = integrand.subs(symbol, x_func).trigsimp()
replaced = manual_subs(replaced, substitutions)
if not replaced.has(symbol):
replaced *= manual_diff(x_func, theta)
replaced = replaced.trigsimp()
secants = replaced.find(1/sympy.cos(theta))
if secants:
replaced = replaced.xreplace({
1/sympy.cos(theta): sympy.sec(theta)
})
substep = integral_steps(replaced, theta)
if not contains_dont_know(substep):
return TrigSubstitutionRule(
theta, x_func, replaced, substep, restriction,
integrand, symbol)
def heaviside_rule(integral):
integrand, symbol = integral
pattern, m, b, g = heaviside_pattern(symbol)
match = integrand.match(pattern)
if match and 0 != match[g]:
# f = Heaviside(m*x + b)*g
v_step = integral_steps(match[g], symbol)
result = _manualintegrate(v_step)
m, b = match[m], match[b]
return HeavisideRule(m*symbol + b, -b/m, result, integrand, symbol)
def substitution_rule(integral):
integrand, symbol = integral
u_var = sympy.Dummy("u")
substitutions = find_substitutions(integrand, symbol, u_var)
count = 0
if substitutions:
debug("List of Substitution Rules")
ways = []
for u_func, c, substituted in substitutions:
subrule = integral_steps(substituted, u_var)
count = count + 1
debug("Rule {}: {}".format(count, subrule))
if contains_dont_know(subrule):
continue
if sympy.simplify(c - 1) != 0:
_, denom = c.as_numer_denom()
if subrule:
subrule = ConstantTimesRule(c, substituted, subrule, substituted, u_var)
if denom.free_symbols:
piecewise = []
could_be_zero = []
if isinstance(denom, sympy.Mul):
could_be_zero = denom.args
else:
could_be_zero.append(denom)
for expr in could_be_zero:
if not fuzzy_not(expr.is_zero):
substep = integral_steps(manual_subs(integrand, expr, 0), symbol)
if substep:
piecewise.append((
substep,
sympy.Eq(expr, 0)
))
piecewise.append((subrule, True))
subrule = PiecewiseRule(piecewise, substituted, symbol)
ways.append(URule(u_var, u_func, c,
subrule,
integrand, symbol))
if len(ways) > 1:
return AlternativeRule(ways, integrand, symbol)
elif ways:
return ways[0]
elif integrand.has(sympy.exp):
u_func = sympy.exp(symbol)
c = 1
substituted = integrand / u_func.diff(symbol)
substituted = substituted.subs(u_func, u_var)
if symbol not in substituted.free_symbols:
return URule(u_var, u_func, c,
integral_steps(substituted, u_var),
integrand, symbol)
partial_fractions_rule = rewriter(
lambda integrand, symbol: integrand.is_rational_function(),
lambda integrand, symbol: integrand.apart(symbol))
cancel_rule = rewriter(
# lambda integrand, symbol: integrand.is_algebraic_expr(),
# lambda integrand, symbol: isinstance(integrand, sympy.Mul),
lambda integrand, symbol: True,
lambda integrand, symbol: integrand.cancel())
distribute_expand_rule = rewriter(
lambda integrand, symbol: (
all(arg.is_Pow or arg.is_polynomial(symbol) for arg in integrand.args)
or isinstance(integrand, sympy.Pow)
or isinstance(integrand, sympy.Mul)),
lambda integrand, symbol: integrand.expand())
trig_expand_rule = rewriter(
# If there are trig functions with different arguments, expand them
lambda integrand, symbol: (
len(set(a.args[0] for a in integrand.atoms(TrigonometricFunction))) > 1),
lambda integrand, symbol: integrand.expand(trig=True))
def derivative_rule(integral):
integrand = integral[0]
diff_variables = integrand.variables
undifferentiated_function = integrand.expr
integrand_variables = undifferentiated_function.free_symbols
if integral.symbol in integrand_variables:
if integral.symbol in diff_variables:
return DerivativeRule(*integral)
else:
return DontKnowRule(integrand, integral.symbol)
else:
return ConstantRule(integral.integrand, *integral)
def rewrites_rule(integral):
integrand, symbol = integral
if integrand.match(1/sympy.cos(symbol)):
rewritten = integrand.subs(1/sympy.cos(symbol), sympy.sec(symbol))
return RewriteRule(rewritten, integral_steps(rewritten, symbol), integrand, symbol)
def fallback_rule(integral):
return DontKnowRule(*integral)
# Cache is used to break cyclic integrals.
# Need to use the same dummy variable in cached expressions for them to match.
# Also record "u" of integration by parts, to avoid infinite repetition.
_integral_cache = {}
_parts_u_cache = defaultdict(int)
_cache_dummy = sympy.Dummy("z")
def integral_steps(integrand, symbol, **options):
"""Returns the steps needed to compute an integral.
This function attempts to mirror what a student would do by hand as
closely as possible.
SymPy Gamma uses this to provide a step-by-step explanation of an
integral. The code it uses to format the results of this function can be
found at
https://github.com/sympy/sympy_gamma/blob/master/app/logic/intsteps.py.
Examples
========
>>> from sympy import exp, sin, cos
>>> from sympy.integrals.manualintegrate import integral_steps
>>> from sympy.abc import x
>>> print(repr(integral_steps(exp(x) / (1 + exp(2 * x)), x))) \
# doctest: +NORMALIZE_WHITESPACE
URule(u_var=_u, u_func=exp(x), constant=1,
substep=PiecewiseRule(subfunctions=[(ArctanRule(a=1, b=1, c=1, context=1/(_u**2 + 1), symbol=_u), True),
(ArccothRule(a=1, b=1, c=1, context=1/(_u**2 + 1), symbol=_u), False),
(ArctanhRule(a=1, b=1, c=1, context=1/(_u**2 + 1), symbol=_u), False)],
context=1/(_u**2 + 1), symbol=_u), context=exp(x)/(exp(2*x) + 1), symbol=x)
>>> print(repr(integral_steps(sin(x), x))) \
# doctest: +NORMALIZE_WHITESPACE
TrigRule(func='sin', arg=x, context=sin(x), symbol=x)
>>> print(repr(integral_steps((x**2 + 3)**2 , x))) \
# doctest: +NORMALIZE_WHITESPACE
RewriteRule(rewritten=x**4 + 6*x**2 + 9,
substep=AddRule(substeps=[PowerRule(base=x, exp=4, context=x**4, symbol=x),
ConstantTimesRule(constant=6, other=x**2,
substep=PowerRule(base=x, exp=2, context=x**2, symbol=x),
context=6*x**2, symbol=x),
ConstantRule(constant=9, context=9, symbol=x)],
context=x**4 + 6*x**2 + 9, symbol=x), context=(x**2 + 3)**2, symbol=x)
Returns
=======
rule : namedtuple
The first step; most rules have substeps that must also be
considered. These substeps can be evaluated using ``manualintegrate``
to obtain a result.
"""
cachekey = integrand.xreplace({symbol: _cache_dummy})
if cachekey in _integral_cache:
if _integral_cache[cachekey] is None:
# Stop this attempt, because it leads around in a loop
return DontKnowRule(integrand, symbol)
else:
# TODO: This is for future development, as currently
# _integral_cache gets no values other than None
return (_integral_cache[cachekey].xreplace(_cache_dummy, symbol),
symbol)
else:
_integral_cache[cachekey] = None
integral = IntegralInfo(integrand, symbol)
def key(integral):
integrand = integral.integrand
if isinstance(integrand, TrigonometricFunction):
return TrigonometricFunction
elif isinstance(integrand, sympy.Derivative):
return sympy.Derivative
elif symbol not in integrand.free_symbols:
return sympy.Number
else:
for cls in (sympy.Pow, sympy.Symbol, sympy.exp, sympy.log,
sympy.Add, sympy.Mul, sympy.atan, sympy.asin,
sympy.acos, sympy.Heaviside, OrthogonalPolynomial):
if isinstance(integrand, cls):
return cls
def integral_is_subclass(*klasses):
def _integral_is_subclass(integral):
k = key(integral)
return k and issubclass(k, klasses)
return _integral_is_subclass
result = do_one(
null_safe(special_function_rule),
null_safe(switch(key, {
sympy.Pow: do_one(null_safe(power_rule), null_safe(inverse_trig_rule), \
null_safe(quadratic_denom_rule)),
sympy.Symbol: power_rule,
sympy.exp: exp_rule,
sympy.Add: add_rule,
sympy.Mul: do_one(null_safe(mul_rule), null_safe(trig_product_rule), \
null_safe(heaviside_rule), null_safe(quadratic_denom_rule), \
null_safe(root_mul_rule)),
sympy.Derivative: derivative_rule,
TrigonometricFunction: trig_rule,
sympy.Heaviside: heaviside_rule,
OrthogonalPolynomial: orthogonal_poly_rule,
sympy.Number: constant_rule
})),
do_one(
null_safe(trig_rule),
null_safe(alternatives(
rewrites_rule,
substitution_rule,
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
partial_fractions_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
cancel_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.log, sympy.atan, sympy.asin, sympy.acos),
parts_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
distribute_expand_rule),
trig_powers_products_rule,
trig_expand_rule
)),
null_safe(trig_substitution_rule)
),
fallback_rule)(integral)
del _integral_cache[cachekey]
return result
@evaluates(ConstantRule)
def eval_constant(constant, integrand, symbol):
return constant * symbol
@evaluates(ConstantTimesRule)
def eval_constanttimes(constant, other, substep, integrand, symbol):
return constant * _manualintegrate(substep)
@evaluates(PowerRule)
def eval_power(base, exp, integrand, symbol):
return sympy.Piecewise(
((base**(exp + 1))/(exp + 1), sympy.Ne(exp, -1)),
(sympy.log(base), True),
)
@evaluates(ExpRule)
def eval_exp(base, exp, integrand, symbol):
return integrand / sympy.ln(base)
@evaluates(AddRule)
def eval_add(substeps, integrand, symbol):
return sum(map(_manualintegrate, substeps))
@evaluates(URule)
def eval_u(u_var, u_func, constant, substep, integrand, symbol):
result = _manualintegrate(substep)
if u_func.is_Pow and u_func.exp == -1:
# avoid needless -log(1/x) from substitution
result = result.subs(sympy.log(u_var), -sympy.log(u_func.base))
return result.subs(u_var, u_func)
@evaluates(PartsRule)
def eval_parts(u, dv, v_step, second_step, integrand, symbol):
v = _manualintegrate(v_step)
return u * v - _manualintegrate(second_step)
@evaluates(CyclicPartsRule)
def eval_cyclicparts(parts_rules, coefficient, integrand, symbol):
coefficient = 1 - coefficient
result = []
sign = 1
for rule in parts_rules:
result.append(sign * rule.u * _manualintegrate(rule.v_step))
sign *= -1
return sympy.Add(*result) / coefficient
@evaluates(TrigRule)
def eval_trig(func, arg, integrand, symbol):
if func == 'sin':
return -sympy.cos(arg)
elif func == 'cos':
return sympy.sin(arg)
elif func == 'sec*tan':
return sympy.sec(arg)
elif func == 'csc*cot':
return sympy.csc(arg)
elif func == 'sec**2':
return sympy.tan(arg)
elif func == 'csc**2':
return -sympy.cot(arg)
@evaluates(ArctanRule)
def eval_arctan(a, b, c, integrand, symbol):
return a / b * 1 / sympy.sqrt(c / b) * sympy.atan(symbol / sympy.sqrt(c / b))
@evaluates(ArccothRule)
def eval_arccoth(a, b, c, integrand, symbol):
return - a / b * 1 / sympy.sqrt(-c / b) * sympy.acoth(symbol / sympy.sqrt(-c / b))
@evaluates(ArctanhRule)
def eval_arctanh(a, b, c, integrand, symbol):
return - a / b * 1 / sympy.sqrt(-c / b) * sympy.atanh(symbol / sympy.sqrt(-c / b))
@evaluates(ReciprocalRule)
def eval_reciprocal(func, integrand, symbol):
return sympy.ln(func)
@evaluates(ArcsinRule)
def eval_arcsin(integrand, symbol):
return sympy.asin(symbol)
@evaluates(InverseHyperbolicRule)
def eval_inversehyperbolic(func, integrand, symbol):
return func(symbol)
@evaluates(AlternativeRule)
def eval_alternative(alternatives, integrand, symbol):
return _manualintegrate(alternatives[0])
@evaluates(RewriteRule)
def eval_rewrite(rewritten, substep, integrand, symbol):
return _manualintegrate(substep)
@evaluates(PiecewiseRule)
def eval_piecewise(substeps, integrand, symbol):
return sympy.Piecewise(*[(_manualintegrate(substep), cond)
for substep, cond in substeps])
@evaluates(TrigSubstitutionRule)
def eval_trigsubstitution(theta, func, rewritten, substep, restriction, integrand, symbol):
func = func.subs(sympy.sec(theta), 1/sympy.cos(theta))
trig_function = list(func.find(TrigonometricFunction))
assert len(trig_function) == 1
trig_function = trig_function[0]
relation = sympy.solve(symbol - func, trig_function)
assert len(relation) == 1
numer, denom = sympy.fraction(relation[0])
if isinstance(trig_function, sympy.sin):
opposite = numer
hypotenuse = denom
adjacent = sympy.sqrt(denom**2 - numer**2)
inverse = sympy.asin(relation[0])
elif isinstance(trig_function, sympy.cos):
adjacent = numer
hypotenuse = denom
opposite = sympy.sqrt(denom**2 - numer**2)
inverse = sympy.acos(relation[0])
elif isinstance(trig_function, sympy.tan):
opposite = numer
adjacent = denom
hypotenuse = sympy.sqrt(denom**2 + numer**2)
inverse = sympy.atan(relation[0])
substitution = [
(sympy.sin(theta), opposite/hypotenuse),
(sympy.cos(theta), adjacent/hypotenuse),
(sympy.tan(theta), opposite/adjacent),
(theta, inverse)
]
return sympy.Piecewise(
(_manualintegrate(substep).subs(substitution).trigsimp(), restriction)
)
@evaluates(DerivativeRule)
def eval_derivativerule(integrand, symbol):
# isinstance(integrand, Derivative) should be True
variable_count = list(integrand.variable_count)
for i, (var, count) in enumerate(variable_count):
if var == symbol:
variable_count[i] = (var, count-1)
break
return sympy.Derivative(integrand.expr, *variable_count)
@evaluates(HeavisideRule)
def eval_heaviside(harg, ibnd, substep, integrand, symbol):
# If we are integrating over x and the integrand has the form
# Heaviside(m*x+b)*g(x) == Heaviside(harg)*g(symbol)
# then there needs to be continuity at -b/m == ibnd,
# so we subtract the appropriate term.
return sympy.Heaviside(harg)*(substep - substep.subs(symbol, ibnd))
@evaluates(JacobiRule)
def eval_jacobi(n, a, b, integrand, symbol):
return Piecewise(
(2*sympy.jacobi(n + 1, a - 1, b - 1, symbol)/(n + a + b), Ne(n + a + b, 0)),
(symbol, Eq(n, 0)),
((a + b + 2)*symbol**2/4 + (a - b)*symbol/2, Eq(n, 1)))
@evaluates(GegenbauerRule)
def eval_gegenbauer(n, a, integrand, symbol):
return Piecewise(
(sympy.gegenbauer(n + 1, a - 1, symbol)/(2*(a - 1)), Ne(a, 1)),
(sympy.chebyshevt(n + 1, symbol)/(n + 1), Ne(n, -1)),
(sympy.S.Zero, True))
@evaluates(ChebyshevTRule)
def eval_chebyshevt(n, integrand, symbol):
return Piecewise(((sympy.chebyshevt(n + 1, symbol)/(n + 1) -
sympy.chebyshevt(n - 1, symbol)/(n - 1))/2, Ne(sympy.Abs(n), 1)),
(symbol**2/2, True))
@evaluates(ChebyshevURule)
def eval_chebyshevu(n, integrand, symbol):
return Piecewise(
(sympy.chebyshevt(n + 1, symbol)/(n + 1), Ne(n, -1)),
(sympy.S.Zero, True))
@evaluates(LegendreRule)
def eval_legendre(n, integrand, symbol):
return (sympy.legendre(n + 1, symbol) - sympy.legendre(n - 1, symbol))/(2*n + 1)
@evaluates(HermiteRule)
def eval_hermite(n, integrand, symbol):
return sympy.hermite(n + 1, symbol)/(2*(n + 1))
@evaluates(LaguerreRule)
def eval_laguerre(n, integrand, symbol):
return sympy.laguerre(n, symbol) - sympy.laguerre(n + 1, symbol)
@evaluates(AssocLaguerreRule)
def eval_assoclaguerre(n, a, integrand, symbol):
return -sympy.assoc_laguerre(n + 1, a - 1, symbol)
@evaluates(CiRule)
def eval_ci(a, b, integrand, symbol):
return sympy.cos(b)*sympy.Ci(a*symbol) - sympy.sin(b)*sympy.Si(a*symbol)
@evaluates(ChiRule)
def eval_chi(a, b, integrand, symbol):
return sympy.cosh(b)*sympy.Chi(a*symbol) + sympy.sinh(b)*sympy.Shi(a*symbol)
@evaluates(EiRule)
def eval_ei(a, b, integrand, symbol):
return sympy.exp(b)*sympy.Ei(a*symbol)
@evaluates(SiRule)
def eval_si(a, b, integrand, symbol):
return sympy.sin(b)*sympy.Ci(a*symbol) + sympy.cos(b)*sympy.Si(a*symbol)
@evaluates(ShiRule)
def eval_shi(a, b, integrand, symbol):
return sympy.sinh(b)*sympy.Chi(a*symbol) + sympy.cosh(b)*sympy.Shi(a*symbol)
@evaluates(ErfRule)
def eval_erf(a, b, c, integrand, symbol):
if a.is_extended_real:
return Piecewise(
(sympy.sqrt(sympy.pi/(-a))/2 * sympy.exp(c - b**2/(4*a)) *
sympy.erf((-2*a*symbol - b)/(2*sympy.sqrt(-a))), a < 0),
(sympy.sqrt(sympy.pi/a)/2 * sympy.exp(c - b**2/(4*a)) *
sympy.erfi((2*a*symbol + b)/(2*sympy.sqrt(a))), True))
else:
return sympy.sqrt(sympy.pi/a)/2 * sympy.exp(c - b**2/(4*a)) * \
sympy.erfi((2*a*symbol + b)/(2*sympy.sqrt(a)))
@evaluates(FresnelCRule)
def eval_fresnelc(a, b, c, integrand, symbol):
return sympy.sqrt(sympy.pi/(2*a)) * (
sympy.cos(b**2/(4*a) - c)*sympy.fresnelc((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)) +
sympy.sin(b**2/(4*a) - c)*sympy.fresnels((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)))
@evaluates(FresnelSRule)
def eval_fresnels(a, b, c, integrand, symbol):
return sympy.sqrt(sympy.pi/(2*a)) * (
sympy.cos(b**2/(4*a) - c)*sympy.fresnels((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)) -
sympy.sin(b**2/(4*a) - c)*sympy.fresnelc((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)))
@evaluates(LiRule)
def eval_li(a, b, integrand, symbol):
return sympy.li(a*symbol + b)/a
@evaluates(PolylogRule)
def eval_polylog(a, b, integrand, symbol):
return sympy.polylog(b + 1, a*symbol)
@evaluates(UpperGammaRule)
def eval_uppergamma(a, e, integrand, symbol):
return symbol**e * (-a*symbol)**(-e) * sympy.uppergamma(e + 1, -a*symbol)/a
@evaluates(EllipticFRule)
def eval_elliptic_f(a, d, integrand, symbol):
return sympy.elliptic_f(symbol, d/a)/sympy.sqrt(a)
@evaluates(EllipticERule)
def eval_elliptic_e(a, d, integrand, symbol):
return sympy.elliptic_e(symbol, d/a)*sympy.sqrt(a)
@evaluates(DontKnowRule)
def eval_dontknowrule(integrand, symbol):
return sympy.Integral(integrand, symbol)
def _manualintegrate(rule):
evaluator = evaluators.get(rule.__class__)
if not evaluator:
raise ValueError("Cannot evaluate rule %s" % repr(rule))
return evaluator(*rule)
def manualintegrate(f, var):
"""manualintegrate(f, var)
Compute indefinite integral of a single variable using an algorithm that
resembles what a student would do by hand.
Unlike ``integrate``, var can only be a single symbol.
Examples
========
>>> from sympy import sin, cos, tan, exp, log, integrate
>>> from sympy.integrals.manualintegrate import manualintegrate
>>> from sympy.abc import x
>>> manualintegrate(1 / x, x)
log(x)
>>> integrate(1/x)
log(x)
>>> manualintegrate(log(x), x)
x*log(x) - x
>>> integrate(log(x))
x*log(x) - x
>>> manualintegrate(exp(x) / (1 + exp(2 * x)), x)
atan(exp(x))
>>> integrate(exp(x) / (1 + exp(2 * x)))
RootSum(4*_z**2 + 1, Lambda(_i, _i*log(2*_i + exp(x))))
>>> manualintegrate(cos(x)**4 * sin(x), x)
-cos(x)**5/5
>>> integrate(cos(x)**4 * sin(x), x)
-cos(x)**5/5
>>> manualintegrate(cos(x)**4 * sin(x)**3, x)
cos(x)**7/7 - cos(x)**5/5
>>> integrate(cos(x)**4 * sin(x)**3, x)
cos(x)**7/7 - cos(x)**5/5
>>> manualintegrate(tan(x), x)
-log(cos(x))
>>> integrate(tan(x), x)
-log(cos(x))
See Also
========
sympy.integrals.integrals.integrate
sympy.integrals.integrals.Integral.doit
sympy.integrals.integrals.Integral
"""
result = _manualintegrate(integral_steps(f, var))
# Clear the cache of u-parts
_parts_u_cache.clear()
# If we got Piecewise with two parts, put generic first
if isinstance(result, Piecewise) and len(result.args) == 2:
cond = result.args[0][1]
if isinstance(cond, Eq) and result.args[1][1] == True:
result = result.func(
(result.args[1][0], sympy.Ne(*cond.args)),
(result.args[0][0], True))
return result
| 37.998793 | 136 | 0.596373 | from __future__ import print_function, division
from collections import namedtuple, defaultdict
import sympy
from sympy.core.compatibility import reduce, Mapping, iterable
from sympy.core.containers import Dict
from sympy.core.logic import fuzzy_not
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.functions.special.polynomials import OrthogonalPolynomial
from sympy.functions.elementary.piecewise import Piecewise
from sympy.strategies.core import switch, do_one, null_safe, condition
from sympy.core.relational import Eq, Ne
from sympy.polys.polytools import degree
from sympy.ntheory.factor_ import divisors
from sympy.utilities.misc import debug
ZERO = sympy.S.Zero
def Rule(name, props=""):
def __eq__(self, other):
return self.__class__ == other.__class__ and tuple.__eq__(self, other)
__neq__ = lambda self, other: not __eq__(self, other)
cls = namedtuple(name, props + " context symbol")
cls.__eq__ = __eq__
cls.__ne__ = __neq__
return cls
ConstantRule = Rule("ConstantRule", "constant")
ConstantTimesRule = Rule("ConstantTimesRule", "constant other substep")
PowerRule = Rule("PowerRule", "base exp")
AddRule = Rule("AddRule", "substeps")
URule = Rule("URule", "u_var u_func constant substep")
PartsRule = Rule("PartsRule", "u dv v_step second_step")
CyclicPartsRule = Rule("CyclicPartsRule", "parts_rules coefficient")
TrigRule = Rule("TrigRule", "func arg")
ExpRule = Rule("ExpRule", "base exp")
ReciprocalRule = Rule("ReciprocalRule", "func")
ArcsinRule = Rule("ArcsinRule")
InverseHyperbolicRule = Rule("InverseHyperbolicRule", "func")
AlternativeRule = Rule("AlternativeRule", "alternatives")
DontKnowRule = Rule("DontKnowRule")
DerivativeRule = Rule("DerivativeRule")
RewriteRule = Rule("RewriteRule", "rewritten substep")
PiecewiseRule = Rule("PiecewiseRule", "subfunctions")
HeavisideRule = Rule("HeavisideRule", "harg ibnd substep")
TrigSubstitutionRule = Rule("TrigSubstitutionRule",
"theta func rewritten substep restriction")
ArctanRule = Rule("ArctanRule", "a b c")
ArccothRule = Rule("ArccothRule", "a b c")
ArctanhRule = Rule("ArctanhRule", "a b c")
JacobiRule = Rule("JacobiRule", "n a b")
GegenbauerRule = Rule("GegenbauerRule", "n a")
ChebyshevTRule = Rule("ChebyshevTRule", "n")
ChebyshevURule = Rule("ChebyshevURule", "n")
LegendreRule = Rule("LegendreRule", "n")
HermiteRule = Rule("HermiteRule", "n")
LaguerreRule = Rule("LaguerreRule", "n")
AssocLaguerreRule = Rule("AssocLaguerreRule", "n a")
CiRule = Rule("CiRule", "a b")
ChiRule = Rule("ChiRule", "a b")
EiRule = Rule("EiRule", "a b")
SiRule = Rule("SiRule", "a b")
ShiRule = Rule("ShiRule", "a b")
ErfRule = Rule("ErfRule", "a b c")
FresnelCRule = Rule("FresnelCRule", "a b c")
FresnelSRule = Rule("FresnelSRule", "a b c")
LiRule = Rule("LiRule", "a b")
PolylogRule = Rule("PolylogRule", "a b")
UpperGammaRule = Rule("UpperGammaRule", "a e")
EllipticFRule = Rule("EllipticFRule", "a d")
EllipticERule = Rule("EllipticERule", "a d")
IntegralInfo = namedtuple('IntegralInfo', 'integrand symbol')
evaluators = {}
def evaluates(rule):
def _evaluates(func):
func.rule = rule
evaluators[rule] = func
return func
return _evaluates
def contains_dont_know(rule):
if isinstance(rule, DontKnowRule):
return True
else:
for val in rule:
if isinstance(val, tuple):
if contains_dont_know(val):
return True
elif isinstance(val, list):
if any(contains_dont_know(i) for i in val):
return True
return False
def manual_diff(f, symbol):
if f.args:
arg = f.args[0]
if isinstance(f, sympy.tan):
return arg.diff(symbol) * sympy.sec(arg)**2
elif isinstance(f, sympy.cot):
return -arg.diff(symbol) * sympy.csc(arg)**2
elif isinstance(f, sympy.sec):
return arg.diff(symbol) * sympy.sec(arg) * sympy.tan(arg)
elif isinstance(f, sympy.csc):
return -arg.diff(symbol) * sympy.csc(arg) * sympy.cot(arg)
elif isinstance(f, sympy.Add):
return sum([manual_diff(arg, symbol) for arg in f.args])
elif isinstance(f, sympy.Mul):
if len(f.args) == 2 and isinstance(f.args[0], sympy.Number):
return f.args[0] * manual_diff(f.args[1], symbol)
return f.diff(symbol)
def manual_subs(expr, *args):
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, (Dict, Mapping)):
sequence = sequence.items()
elif not iterable(sequence):
raise ValueError("Expected an iterable of (old, new) pairs")
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
new_subs = []
for old, new in sequence:
if isinstance(old, sympy.log):
x0 = old.args[0]
expr = expr.replace(lambda x: x.is_Pow and x.base == x0,
lambda x: sympy.exp(x.exp*new))
new_subs.append((x0, sympy.exp(new)))
return expr.subs(list(sequence) + new_subs)
# Stormy Decade"
def find_substitutions(integrand, symbol, u_var):
results = []
def test_subterm(u, u_diff):
if u_diff == 0:
return False
substituted = integrand / u_diff
if symbol not in substituted.free_symbols:
return False
debug("substituted: {}, u: {}, u_var: {}".format(substituted, u, u_var))
substituted = manual_subs(substituted, u, u_var).cancel()
if symbol not in substituted.free_symbols:
if integrand.is_rational_function(symbol) and substituted.is_rational_function(u_var):
deg_before = max([degree(t, symbol) for t in integrand.as_numer_denom()])
deg_after = max([degree(t, u_var) for t in substituted.as_numer_denom()])
if deg_after > deg_before:
return False
return substituted.as_independent(u_var, as_Add=False)
if (isinstance(u, sympy.Pow) and (1/u.exp).is_Integer and
sympy.Abs(u.exp) < 1):
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
match = u.base.match(a*symbol + b)
if match:
a, b = [match.get(i, ZERO) for i in (a, b)]
if a != 0 and b != 0:
substituted = substituted.subs(symbol,
(u_var**(1/u.exp) - b)/a)
return substituted.as_independent(u_var, as_Add=False)
return False
def possible_subterms(term):
if isinstance(term, (TrigonometricFunction,
sympy.asin, sympy.acos, sympy.atan,
sympy.exp, sympy.log, sympy.Heaviside)):
return [term.args[0]]
elif isinstance(term, (sympy.chebyshevt, sympy.chebyshevu,
sympy.legendre, sympy.hermite, sympy.laguerre)):
return [term.args[1]]
elif isinstance(term, (sympy.gegenbauer, sympy.assoc_laguerre)):
return [term.args[2]]
elif isinstance(term, sympy.jacobi):
return [term.args[3]]
elif isinstance(term, sympy.Mul):
r = []
for u in term.args:
r.append(u)
r.extend(possible_subterms(u))
return r
elif isinstance(term, sympy.Pow):
r = []
if term.args[1].is_constant(symbol):
r.append(term.args[0])
elif term.args[0].is_constant(symbol):
r.append(term.args[1])
if term.args[1].is_Integer:
r.extend([term.args[0]**d for d in divisors(term.args[1])
if 1 < d < abs(term.args[1])])
if term.args[0].is_Add:
r.extend([t for t in possible_subterms(term.args[0])
if t.is_Pow])
return r
elif isinstance(term, sympy.Add):
r = []
for arg in term.args:
r.append(arg)
r.extend(possible_subterms(arg))
return r
return []
for u in possible_subterms(integrand):
if u == symbol:
continue
u_diff = manual_diff(u, symbol)
new_integrand = test_subterm(u, u_diff)
if new_integrand is not False:
constant, new_integrand = new_integrand
if new_integrand == integrand.subs(symbol, u_var):
continue
substitution = (u, constant, new_integrand)
if substitution not in results:
results.append(substitution)
return results
def rewriter(condition, rewrite):
def _rewriter(integral):
integrand, symbol = integral
debug("Integral: {} is rewritten with {} on symbol: {}".format(integrand, rewrite, symbol))
if condition(*integral):
rewritten = rewrite(*integral)
if rewritten != integrand:
substep = integral_steps(rewritten, symbol)
if not isinstance(substep, DontKnowRule) and substep:
return RewriteRule(
rewritten,
substep,
integrand, symbol)
return _rewriter
def proxy_rewriter(condition, rewrite):
def _proxy_rewriter(criteria):
criteria, integral = criteria
integrand, symbol = integral
debug("Integral: {} is rewritten with {} on symbol: {} and criteria: {}".format(integrand, rewrite, symbol, criteria))
args = criteria + list(integral)
if condition(*args):
rewritten = rewrite(*args)
if rewritten != integrand:
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol)
return _proxy_rewriter
def multiplexer(conditions):
def multiplexer_rl(expr):
for key, rule in conditions.items():
if key(expr):
return rule(expr)
return multiplexer_rl
def alternatives(*rules):
def _alternatives(integral):
alts = []
count = 0
debug("List of Alternative Rules")
for rule in rules:
count = count + 1
debug("Rule {}: {}".format(count, rule))
result = rule(integral)
if (result and not isinstance(result, DontKnowRule) and
result != integral and result not in alts):
alts.append(result)
if len(alts) == 1:
return alts[0]
elif alts:
doable = [rule for rule in alts if not contains_dont_know(rule)]
if doable:
return AlternativeRule(doable, *integral)
else:
return AlternativeRule(alts, *integral)
return _alternatives
def constant_rule(integral):
integrand, symbol = integral
return ConstantRule(integral.integrand, *integral)
def power_rule(integral):
integrand, symbol = integral
base, exp = integrand.as_base_exp()
if symbol not in exp.free_symbols and isinstance(base, sympy.Symbol):
if sympy.simplify(exp + 1) == 0:
return ReciprocalRule(base, integrand, symbol)
return PowerRule(base, exp, integrand, symbol)
elif symbol not in base.free_symbols and isinstance(exp, sympy.Symbol):
rule = ExpRule(base, exp, integrand, symbol)
if fuzzy_not(sympy.log(base).is_zero):
return rule
elif sympy.log(base).is_zero:
return ConstantRule(1, 1, symbol)
return PiecewiseRule([
(rule, sympy.Ne(sympy.log(base), 0)),
(ConstantRule(1, 1, symbol), True)
], integrand, symbol)
def exp_rule(integral):
integrand, symbol = integral
if isinstance(integrand.args[0], sympy.Symbol):
return ExpRule(sympy.E, integrand.args[0], integrand, symbol)
def orthogonal_poly_rule(integral):
orthogonal_poly_classes = {
sympy.jacobi: JacobiRule,
sympy.gegenbauer: GegenbauerRule,
sympy.chebyshevt: ChebyshevTRule,
sympy.chebyshevu: ChebyshevURule,
sympy.legendre: LegendreRule,
sympy.hermite: HermiteRule,
sympy.laguerre: LaguerreRule,
sympy.assoc_laguerre: AssocLaguerreRule
}
orthogonal_poly_var_index = {
sympy.jacobi: 3,
sympy.gegenbauer: 2,
sympy.assoc_laguerre: 2
}
integrand, symbol = integral
for klass in orthogonal_poly_classes:
if isinstance(integrand, klass):
var_index = orthogonal_poly_var_index.get(klass, 1)
if (integrand.args[var_index] is symbol and not
any(v.has(symbol) for v in integrand.args[:var_index])):
args = integrand.args[:var_index] + (integrand, symbol)
return orthogonal_poly_classes[klass](*args)
def special_function_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[symbol], properties=[lambda x: not x.is_zero])
b = sympy.Wild('b', exclude=[symbol])
c = sympy.Wild('c', exclude=[symbol])
d = sympy.Wild('d', exclude=[symbol], properties=[lambda x: not x.is_zero])
e = sympy.Wild('e', exclude=[symbol], properties=[
lambda x: not (x.is_nonnegative and x.is_integer)])
wilds = (a, b, c, d, e)
patterns = (
(sympy.Mul, sympy.exp(a*symbol + b)/symbol, None, EiRule),
(sympy.Mul, sympy.cos(a*symbol + b)/symbol, None, CiRule),
(sympy.Mul, sympy.cosh(a*symbol + b)/symbol, None, ChiRule),
(sympy.Mul, sympy.sin(a*symbol + b)/symbol, None, SiRule),
(sympy.Mul, sympy.sinh(a*symbol + b)/symbol, None, ShiRule),
(sympy.Pow, 1/sympy.log(a*symbol + b), None, LiRule),
(sympy.exp, sympy.exp(a*symbol**2 + b*symbol + c), None, ErfRule),
(sympy.sin, sympy.sin(a*symbol**2 + b*symbol + c), None, FresnelSRule),
(sympy.cos, sympy.cos(a*symbol**2 + b*symbol + c), None, FresnelCRule),
(sympy.Mul, symbol**e*sympy.exp(a*symbol), None, UpperGammaRule),
(sympy.Mul, sympy.polylog(b, a*symbol)/symbol, None, PolylogRule),
(sympy.Pow, 1/sympy.sqrt(a - d*sympy.sin(symbol)**2),
lambda a, d: a != d, EllipticFRule),
(sympy.Pow, sympy.sqrt(a - d*sympy.sin(symbol)**2),
lambda a, d: a != d, EllipticERule),
)
for p in patterns:
if isinstance(integrand, p[0]):
match = integrand.match(p[1])
if match:
wild_vals = tuple(match.get(w) for w in wilds
if match.get(w) is not None)
if p[2] is None or p[2](*wild_vals):
args = wild_vals + (integrand, symbol)
return p[3](*args)
def inverse_trig_rule(integral):
integrand, symbol = integral
base, exp = integrand.as_base_exp()
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
match = base.match(a + b*symbol**2)
if not match:
return
def negative(x):
return x.is_negative or x.could_extract_minus_sign()
def ArcsinhRule(integrand, symbol):
return InverseHyperbolicRule(sympy.asinh, integrand, symbol)
def ArccoshRule(integrand, symbol):
return InverseHyperbolicRule(sympy.acosh, integrand, symbol)
def make_inverse_trig(RuleClass, base_exp, a, sign_a, b, sign_b):
u_var = sympy.Dummy("u")
current_base = base
current_symbol = symbol
constant = u_func = u_constant = substep = None
factored = integrand
if a != 1:
constant = a**base_exp
current_base = sign_a + sign_b * (b/a) * current_symbol**2
factored = current_base ** base_exp
if (b/a) != 1:
u_func = sympy.sqrt(b/a) * symbol
u_constant = sympy.sqrt(a/b)
current_symbol = u_var
current_base = sign_a + sign_b * current_symbol**2
substep = RuleClass(current_base ** base_exp, current_symbol)
if u_func is not None:
if u_constant != 1 and substep is not None:
substep = ConstantTimesRule(
u_constant, current_base ** base_exp, substep,
u_constant * current_base ** base_exp, symbol)
substep = URule(u_var, u_func, u_constant, substep, factored, symbol)
if constant is not None and substep is not None:
substep = ConstantTimesRule(constant, factored, substep, integrand, symbol)
return substep
a, b = [match.get(i, ZERO) for i in (a, b)]
possibilities = []
if sympy.simplify(2*exp + 1) == 0:
possibilities.append((ArcsinRule, exp, a, 1, -b, -1, sympy.And(a > 0, b < 0)))
possibilities.append((ArcsinhRule, exp, a, 1, b, 1, sympy.And(a > 0, b > 0)))
possibilities.append((ArccoshRule, exp, -a, -1, b, 1, sympy.And(a < 0, b > 0)))
possibilities = [p for p in possibilities if p[-1] is not sympy.false]
if a.is_number and b.is_number:
possibility = [p for p in possibilities if p[-1] is sympy.true]
if len(possibility) == 1:
return make_inverse_trig(*possibility[0][:-1])
elif possibilities:
return PiecewiseRule(
[(make_inverse_trig(*p[:-1]), p[-1]) for p in possibilities],
integrand, symbol)
def add_rule(integral):
integrand, symbol = integral
results = [integral_steps(g, symbol)
for g in integrand.as_ordered_terms()]
return None if None in results else AddRule(results, integrand, symbol)
def mul_rule(integral):
integrand, symbol = integral
coeff, f = integrand.as_independent(symbol)
next_step = integral_steps(f, symbol)
if coeff != 1 and next_step is not None:
return ConstantTimesRule(
coeff, f,
next_step,
integrand, symbol)
def _parts_rule(integrand, symbol):
def pull_out_algebraic(integrand):
integrand = integrand.cancel().together()
algebraic = ([] if isinstance(integrand, sympy.Piecewise)
else [arg for arg in integrand.args if arg.is_algebraic_expr(symbol)])
if algebraic:
u = sympy.Mul(*algebraic)
dv = (integrand / u).cancel()
return u, dv
def pull_out_u(*functions):
def pull_out_u_rl(integrand):
if any([integrand.has(f) for f in functions]):
args = [arg for arg in integrand.args
if any(isinstance(arg, cls) for cls in functions)]
if args:
u = reduce(lambda a,b: a*b, args)
dv = integrand / u
return u, dv
return pull_out_u_rl
liate_rules = [pull_out_u(sympy.log), pull_out_u(sympy.atan, sympy.asin, sympy.acos),
pull_out_algebraic, pull_out_u(sympy.sin, sympy.cos),
pull_out_u(sympy.exp)]
dummy = sympy.Dummy("temporary")
if isinstance(integrand, (sympy.log, sympy.atan, sympy.asin, sympy.acos)):
integrand = dummy * integrand
for index, rule in enumerate(liate_rules):
result = rule(integrand)
if result:
u, dv = result
if symbol not in u.free_symbols and not u.has(dummy):
return
u = u.subs(dummy, 1)
dv = dv.subs(dummy, 1)
# Don't pick a non-polynomial algebraic to be differentiated
if rule == pull_out_algebraic and not u.is_polynomial(symbol):
return
if isinstance(u, sympy.log):
rec_dv = 1/dv
if (rec_dv.is_polynomial(symbol) and
degree(rec_dv, symbol) == 1):
return
# Can integrate a polynomial times OrthogonalPolynomial
if rule == pull_out_algebraic and isinstance(dv, OrthogonalPolynomial):
v_step = integral_steps(dv, symbol)
if contains_dont_know(v_step):
return
else:
du = u.diff(symbol)
v = _manualintegrate(v_step)
return u, dv, v, du, v_step
# make sure dv is amenable to integration
accept = False
if index < 2: # log and inverse trig are usually worth trying
accept = True
elif (rule == pull_out_algebraic and dv.args and
all(isinstance(a, (sympy.sin, sympy.cos, sympy.exp))
for a in dv.args)):
accept = True
else:
for rule in liate_rules[index + 1:]:
r = rule(integrand)
if r and r[0].subs(dummy, 1).equals(dv):
accept = True
break
if accept:
du = u.diff(symbol)
v_step = integral_steps(sympy.simplify(dv), symbol)
if not contains_dont_know(v_step):
v = _manualintegrate(v_step)
return u, dv, v, du, v_step
def parts_rule(integral):
integrand, symbol = integral
constant, integrand = integrand.as_coeff_Mul()
result = _parts_rule(integrand, symbol)
steps = []
if result:
u, dv, v, du, v_step = result
debug("u : {}, dv : {}, v : {}, du : {}, v_step: {}".format(u, dv, v, du, v_step))
steps.append(result)
if isinstance(v, sympy.Integral):
return
# Set a limit on the number of times u can be used
if isinstance(u, (sympy.sin, sympy.cos, sympy.exp, sympy.sinh, sympy.cosh)):
cachekey = u.xreplace({symbol: _cache_dummy})
if _parts_u_cache[cachekey] > 2:
return
_parts_u_cache[cachekey] += 1
# Try cyclic integration by parts a few times
for _ in range(4):
debug("Cyclic integration {} with v: {}, du: {}, integrand: {}".format(_, v, du, integrand))
coefficient = ((v * du) / integrand).cancel()
if coefficient == 1:
break
if symbol not in coefficient.free_symbols:
rule = CyclicPartsRule(
[PartsRule(u, dv, v_step, None, None, None)
for (u, dv, v, du, v_step) in steps],
(-1) ** len(steps) * coefficient,
integrand, symbol
)
if (constant != 1) and rule:
rule = ConstantTimesRule(constant, integrand, rule,
constant * integrand, symbol)
return rule
# _parts_rule is sensitive to constants, factor it out
next_constant, next_integrand = (v * du).as_coeff_Mul()
result = _parts_rule(next_integrand, symbol)
if result:
u, dv, v, du, v_step = result
u *= next_constant
du *= next_constant
steps.append((u, dv, v, du, v_step))
else:
break
def make_second_step(steps, integrand):
if steps:
u, dv, v, du, v_step = steps[0]
return PartsRule(u, dv, v_step,
make_second_step(steps[1:], v * du),
integrand, symbol)
else:
steps = integral_steps(integrand, symbol)
if steps:
return steps
else:
return DontKnowRule(integrand, symbol)
if steps:
u, dv, v, du, v_step = steps[0]
rule = PartsRule(u, dv, v_step,
make_second_step(steps[1:], v * du),
integrand, symbol)
if (constant != 1) and rule:
rule = ConstantTimesRule(constant, integrand, rule,
constant * integrand, symbol)
return rule
def trig_rule(integral):
integrand, symbol = integral
if isinstance(integrand, sympy.sin) or isinstance(integrand, sympy.cos):
arg = integrand.args[0]
if not isinstance(arg, sympy.Symbol):
return # perhaps a substitution can deal with it
if isinstance(integrand, sympy.sin):
func = 'sin'
else:
func = 'cos'
return TrigRule(func, arg, integrand, symbol)
if integrand == sympy.sec(symbol)**2:
return TrigRule('sec**2', symbol, integrand, symbol)
elif integrand == sympy.csc(symbol)**2:
return TrigRule('csc**2', symbol, integrand, symbol)
if isinstance(integrand, sympy.tan):
rewritten = sympy.sin(*integrand.args) / sympy.cos(*integrand.args)
elif isinstance(integrand, sympy.cot):
rewritten = sympy.cos(*integrand.args) / sympy.sin(*integrand.args)
elif isinstance(integrand, sympy.sec):
arg = integrand.args[0]
rewritten = ((sympy.sec(arg)**2 + sympy.tan(arg) * sympy.sec(arg)) /
(sympy.sec(arg) + sympy.tan(arg)))
elif isinstance(integrand, sympy.csc):
arg = integrand.args[0]
rewritten = ((sympy.csc(arg)**2 + sympy.cot(arg) * sympy.csc(arg)) /
(sympy.csc(arg) + sympy.cot(arg)))
else:
return
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol
)
def trig_product_rule(integral):
integrand, symbol = integral
sectan = sympy.sec(symbol) * sympy.tan(symbol)
q = integrand / sectan
if symbol not in q.free_symbols:
rule = TrigRule('sec*tan', symbol, sectan, symbol)
if q != 1 and rule:
rule = ConstantTimesRule(q, sectan, rule, integrand, symbol)
return rule
csccot = -sympy.csc(symbol) * sympy.cot(symbol)
q = integrand / csccot
if symbol not in q.free_symbols:
rule = TrigRule('csc*cot', symbol, csccot, symbol)
if q != 1 and rule:
rule = ConstantTimesRule(q, csccot, rule, integrand, symbol)
return rule
def quadratic_denom_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
c = sympy.Wild('c', exclude=[symbol])
match = integrand.match(a / (b * symbol ** 2 + c))
if match:
a, b, c = match[a], match[b], match[c]
if b.is_extended_real and c.is_extended_real:
return PiecewiseRule([(ArctanRule(a, b, c, integrand, symbol), sympy.Gt(c / b, 0)),
(ArccothRule(a, b, c, integrand, symbol), sympy.And(sympy.Gt(symbol ** 2, -c / b), sympy.Lt(c / b, 0))),
(ArctanhRule(a, b, c, integrand, symbol), sympy.And(sympy.Lt(symbol ** 2, -c / b), sympy.Lt(c / b, 0))),
], integrand, symbol)
else:
return ArctanRule(a, b, c, integrand, symbol)
d = sympy.Wild('d', exclude=[symbol])
match2 = integrand.match(a / (b * symbol ** 2 + c * symbol + d))
if match2:
b, c = match2[b], match2[c]
if b.is_zero:
return
u = sympy.Dummy('u')
u_func = symbol + c/(2*b)
integrand2 = integrand.subs(symbol, u - c / (2*b))
next_step = integral_steps(integrand2, u)
if next_step:
return URule(u, u_func, None, next_step, integrand2, symbol)
else:
return
e = sympy.Wild('e', exclude=[symbol])
match3 = integrand.match((a* symbol + b) / (c * symbol ** 2 + d * symbol + e))
if match3:
a, b, c, d, e = match3[a], match3[b], match3[c], match3[d], match3[e]
if c.is_zero:
return
denominator = c * symbol**2 + d * symbol + e
const = a/(2*c)
numer1 = (2*c*symbol+d)
numer2 = - const*d + b
u = sympy.Dummy('u')
step1 = URule(u,
denominator,
const,
integral_steps(u**(-1), u),
integrand,
symbol)
if const != 1:
step1 = ConstantTimesRule(const,
numer1/denominator,
step1,
const*numer1/denominator,
symbol)
if numer2.is_zero:
return step1
step2 = integral_steps(numer2/denominator, symbol)
substeps = AddRule([step1, step2], integrand, symbol)
rewriten = const*numer1/denominator+numer2/denominator
return RewriteRule(rewriten, substeps, integrand, symbol)
return
def root_mul_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
c = sympy.Wild('c')
match = integrand.match(sympy.sqrt(a * symbol + b) * c)
if not match:
return
a, b, c = match[a], match[b], match[c]
d = sympy.Wild('d', exclude=[symbol])
e = sympy.Wild('e', exclude=[symbol])
f = sympy.Wild('f')
recursion_test = c.match(sympy.sqrt(d * symbol + e) * f)
if recursion_test:
return
u = sympy.Dummy('u')
u_func = sympy.sqrt(a * symbol + b)
integrand = integrand.subs(u_func, u)
integrand = integrand.subs(symbol, (u**2 - b) / a)
integrand = integrand * 2 * u / a
next_step = integral_steps(integrand, u)
if next_step:
return URule(u, u_func, None, next_step, integrand, symbol)
@sympy.cacheit
def make_wilds(symbol):
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
m = sympy.Wild('m', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)])
n = sympy.Wild('n', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)])
return a, b, m, n
@sympy.cacheit
def sincos_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.sin(a*symbol)**m * sympy.cos(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def tansec_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.tan(a*symbol)**m * sympy.sec(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def cotcsc_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.cot(a*symbol)**m * sympy.csc(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def heaviside_pattern(symbol):
m = sympy.Wild('m', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
g = sympy.Wild('g')
pattern = sympy.Heaviside(m*symbol + b) * g
return pattern, m, b, g
def uncurry(func):
def uncurry_rl(args):
return func(*args)
return uncurry_rl
def trig_rewriter(rewrite):
def trig_rewriter_rl(args):
a, b, m, n, integrand, symbol = args
rewritten = rewrite(a, b, m, n, integrand, symbol)
if rewritten != integrand:
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol)
return trig_rewriter_rl
sincos_botheven_condition = uncurry(
lambda a, b, m, n, i, s: m.is_even and n.is_even and
m.is_nonnegative and n.is_nonnegative)
sincos_botheven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (((1 - sympy.cos(2*a*symbol)) / 2) ** (m / 2)) *
(((1 + sympy.cos(2*b*symbol)) / 2) ** (n / 2)) ))
sincos_sinodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd and m >= 3)
sincos_sinodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - sympy.cos(a*symbol)**2)**((m - 1) / 2) *
sympy.sin(a*symbol) *
sympy.cos(b*symbol) ** n))
sincos_cosodd_condition = uncurry(lambda a, b, m, n, i, s: n.is_odd and n >= 3)
sincos_cosodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - sympy.sin(b*symbol)**2)**((n - 1) / 2) *
sympy.cos(b*symbol) *
sympy.sin(a*symbol) ** m))
tansec_seceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
tansec_seceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + sympy.tan(b*symbol)**2) ** (n/2 - 1) *
sympy.sec(b*symbol)**2 *
sympy.tan(a*symbol) ** m ))
tansec_tanodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
tansec_tanodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (sympy.sec(a*symbol)**2 - 1) ** ((m - 1) / 2) *
sympy.tan(a*symbol) *
sympy.sec(b*symbol) ** n ))
tan_tansquared_condition = uncurry(lambda a, b, m, n, i, s: m == 2 and n == 0)
tan_tansquared = trig_rewriter(
lambda a, b, m, n, i, symbol: ( sympy.sec(a*symbol)**2 - 1))
cotcsc_csceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
cotcsc_csceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + sympy.cot(b*symbol)**2) ** (n/2 - 1) *
sympy.csc(b*symbol)**2 *
sympy.cot(a*symbol) ** m ))
cotcsc_cotodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
cotcsc_cotodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (sympy.csc(a*symbol)**2 - 1) ** ((m - 1) / 2) *
sympy.cot(a*symbol) *
sympy.csc(b*symbol) ** n ))
def trig_sincos_rule(integral):
integrand, symbol = integral
if any(integrand.has(f) for f in (sympy.sin, sympy.cos)):
pattern, a, b, m, n = sincos_pattern(symbol)
match = integrand.match(pattern)
if not match:
return
return multiplexer({
sincos_botheven_condition: sincos_botheven,
sincos_sinodd_condition: sincos_sinodd,
sincos_cosodd_condition: sincos_cosodd
})(tuple(
[match.get(i, ZERO) for i in (a, b, m, n)] +
[integrand, symbol]))
def trig_tansec_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / sympy.cos(symbol): sympy.sec(symbol)
})
if any(integrand.has(f) for f in (sympy.tan, sympy.sec)):
pattern, a, b, m, n = tansec_pattern(symbol)
match = integrand.match(pattern)
if not match:
return
return multiplexer({
tansec_tanodd_condition: tansec_tanodd,
tansec_seceven_condition: tansec_seceven,
tan_tansquared_condition: tan_tansquared
})(tuple(
[match.get(i, ZERO) for i in (a, b, m, n)] +
[integrand, symbol]))
def trig_cotcsc_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / sympy.sin(symbol): sympy.csc(symbol),
1 / sympy.tan(symbol): sympy.cot(symbol),
sympy.cos(symbol) / sympy.tan(symbol): sympy.cot(symbol)
})
if any(integrand.has(f) for f in (sympy.cot, sympy.csc)):
pattern, a, b, m, n = cotcsc_pattern(symbol)
match = integrand.match(pattern)
if not match:
return
return multiplexer({
cotcsc_cotodd_condition: cotcsc_cotodd,
cotcsc_csceven_condition: cotcsc_csceven
})(tuple(
[match.get(i, ZERO) for i in (a, b, m, n)] +
[integrand, symbol]))
def trig_sindouble_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[sympy.sin(2*symbol)])
match = integrand.match(sympy.sin(2*symbol)*a)
if match:
sin_double = 2*sympy.sin(symbol)*sympy.cos(symbol)/sympy.sin(2*symbol)
return integral_steps(integrand * sin_double, symbol)
def trig_powers_products_rule(integral):
return do_one(null_safe(trig_sincos_rule),
null_safe(trig_tansec_rule),
null_safe(trig_cotcsc_rule),
null_safe(trig_sindouble_rule))(integral)
def trig_substitution_rule(integral):
integrand, symbol = integral
A = sympy.Wild('a', exclude=[0, symbol])
B = sympy.Wild('b', exclude=[0, symbol])
theta = sympy.Dummy("theta")
target_pattern = A + B*symbol**2
matches = integrand.find(target_pattern)
for expr in matches:
match = expr.match(target_pattern)
a = match.get(A, ZERO)
b = match.get(B, ZERO)
a_positive = ((a.is_number and a > 0) or a.is_positive)
b_positive = ((b.is_number and b > 0) or b.is_positive)
a_negative = ((a.is_number and a < 0) or a.is_negative)
b_negative = ((b.is_number and b < 0) or b.is_negative)
x_func = None
if a_positive and b_positive:
# a**2 + b*x**2. Assume sec(theta) > 0, -pi/2 < theta < pi/2
x_func = (sympy.sqrt(a)/sympy.sqrt(b)) * sympy.tan(theta)
# Do not restrict the domain: tan(theta) takes on any real
# value on the interval -pi/2 < theta < pi/2 so x takes on
# any value
restriction = True
elif a_positive and b_negative:
# a**2 - b*x**2. Assume cos(theta) > 0, -pi/2 < theta < pi/2
constant = sympy.sqrt(a)/sympy.sqrt(-b)
x_func = constant * sympy.sin(theta)
restriction = sympy.And(symbol > -constant, symbol < constant)
elif a_negative and b_positive:
# b*x**2 - a**2. Assume sin(theta) > 0, 0 < theta < pi
constant = sympy.sqrt(-a)/sympy.sqrt(b)
x_func = constant * sympy.sec(theta)
restriction = sympy.And(symbol > -constant, symbol < constant)
if x_func:
# Manually simplify sqrt(trig(theta)**2) to trig(theta)
# Valid due to assumed domain restriction
substitutions = {}
for f in [sympy.sin, sympy.cos, sympy.tan,
sympy.sec, sympy.csc, sympy.cot]:
substitutions[sympy.sqrt(f(theta)**2)] = f(theta)
substitutions[sympy.sqrt(f(theta)**(-2))] = 1/f(theta)
replaced = integrand.subs(symbol, x_func).trigsimp()
replaced = manual_subs(replaced, substitutions)
if not replaced.has(symbol):
replaced *= manual_diff(x_func, theta)
replaced = replaced.trigsimp()
secants = replaced.find(1/sympy.cos(theta))
if secants:
replaced = replaced.xreplace({
1/sympy.cos(theta): sympy.sec(theta)
})
substep = integral_steps(replaced, theta)
if not contains_dont_know(substep):
return TrigSubstitutionRule(
theta, x_func, replaced, substep, restriction,
integrand, symbol)
def heaviside_rule(integral):
integrand, symbol = integral
pattern, m, b, g = heaviside_pattern(symbol)
match = integrand.match(pattern)
if match and 0 != match[g]:
# f = Heaviside(m*x + b)*g
v_step = integral_steps(match[g], symbol)
result = _manualintegrate(v_step)
m, b = match[m], match[b]
return HeavisideRule(m*symbol + b, -b/m, result, integrand, symbol)
def substitution_rule(integral):
integrand, symbol = integral
u_var = sympy.Dummy("u")
substitutions = find_substitutions(integrand, symbol, u_var)
count = 0
if substitutions:
debug("List of Substitution Rules")
ways = []
for u_func, c, substituted in substitutions:
subrule = integral_steps(substituted, u_var)
count = count + 1
debug("Rule {}: {}".format(count, subrule))
if contains_dont_know(subrule):
continue
if sympy.simplify(c - 1) != 0:
_, denom = c.as_numer_denom()
if subrule:
subrule = ConstantTimesRule(c, substituted, subrule, substituted, u_var)
if denom.free_symbols:
piecewise = []
could_be_zero = []
if isinstance(denom, sympy.Mul):
could_be_zero = denom.args
else:
could_be_zero.append(denom)
for expr in could_be_zero:
if not fuzzy_not(expr.is_zero):
substep = integral_steps(manual_subs(integrand, expr, 0), symbol)
if substep:
piecewise.append((
substep,
sympy.Eq(expr, 0)
))
piecewise.append((subrule, True))
subrule = PiecewiseRule(piecewise, substituted, symbol)
ways.append(URule(u_var, u_func, c,
subrule,
integrand, symbol))
if len(ways) > 1:
return AlternativeRule(ways, integrand, symbol)
elif ways:
return ways[0]
elif integrand.has(sympy.exp):
u_func = sympy.exp(symbol)
c = 1
substituted = integrand / u_func.diff(symbol)
substituted = substituted.subs(u_func, u_var)
if symbol not in substituted.free_symbols:
return URule(u_var, u_func, c,
integral_steps(substituted, u_var),
integrand, symbol)
partial_fractions_rule = rewriter(
lambda integrand, symbol: integrand.is_rational_function(),
lambda integrand, symbol: integrand.apart(symbol))
cancel_rule = rewriter(
# lambda integrand, symbol: integrand.is_algebraic_expr(),
# lambda integrand, symbol: isinstance(integrand, sympy.Mul),
lambda integrand, symbol: True,
lambda integrand, symbol: integrand.cancel())
distribute_expand_rule = rewriter(
lambda integrand, symbol: (
all(arg.is_Pow or arg.is_polynomial(symbol) for arg in integrand.args)
or isinstance(integrand, sympy.Pow)
or isinstance(integrand, sympy.Mul)),
lambda integrand, symbol: integrand.expand())
trig_expand_rule = rewriter(
# If there are trig functions with different arguments, expand them
lambda integrand, symbol: (
len(set(a.args[0] for a in integrand.atoms(TrigonometricFunction))) > 1),
lambda integrand, symbol: integrand.expand(trig=True))
def derivative_rule(integral):
integrand = integral[0]
diff_variables = integrand.variables
undifferentiated_function = integrand.expr
integrand_variables = undifferentiated_function.free_symbols
if integral.symbol in integrand_variables:
if integral.symbol in diff_variables:
return DerivativeRule(*integral)
else:
return DontKnowRule(integrand, integral.symbol)
else:
return ConstantRule(integral.integrand, *integral)
def rewrites_rule(integral):
integrand, symbol = integral
if integrand.match(1/sympy.cos(symbol)):
rewritten = integrand.subs(1/sympy.cos(symbol), sympy.sec(symbol))
return RewriteRule(rewritten, integral_steps(rewritten, symbol), integrand, symbol)
def fallback_rule(integral):
return DontKnowRule(*integral)
# Cache is used to break cyclic integrals.
# Need to use the same dummy variable in cached expressions for them to match.
# Also record "u" of integration by parts, to avoid infinite repetition.
_integral_cache = {}
_parts_u_cache = defaultdict(int)
_cache_dummy = sympy.Dummy("z")
def integral_steps(integrand, symbol, **options):
cachekey = integrand.xreplace({symbol: _cache_dummy})
if cachekey in _integral_cache:
if _integral_cache[cachekey] is None:
# Stop this attempt, because it leads around in a loop
return DontKnowRule(integrand, symbol)
else:
# TODO: This is for future development, as currently
# _integral_cache gets no values other than None
return (_integral_cache[cachekey].xreplace(_cache_dummy, symbol),
symbol)
else:
_integral_cache[cachekey] = None
integral = IntegralInfo(integrand, symbol)
def key(integral):
integrand = integral.integrand
if isinstance(integrand, TrigonometricFunction):
return TrigonometricFunction
elif isinstance(integrand, sympy.Derivative):
return sympy.Derivative
elif symbol not in integrand.free_symbols:
return sympy.Number
else:
for cls in (sympy.Pow, sympy.Symbol, sympy.exp, sympy.log,
sympy.Add, sympy.Mul, sympy.atan, sympy.asin,
sympy.acos, sympy.Heaviside, OrthogonalPolynomial):
if isinstance(integrand, cls):
return cls
def integral_is_subclass(*klasses):
def _integral_is_subclass(integral):
k = key(integral)
return k and issubclass(k, klasses)
return _integral_is_subclass
result = do_one(
null_safe(special_function_rule),
null_safe(switch(key, {
sympy.Pow: do_one(null_safe(power_rule), null_safe(inverse_trig_rule), \
null_safe(quadratic_denom_rule)),
sympy.Symbol: power_rule,
sympy.exp: exp_rule,
sympy.Add: add_rule,
sympy.Mul: do_one(null_safe(mul_rule), null_safe(trig_product_rule), \
null_safe(heaviside_rule), null_safe(quadratic_denom_rule), \
null_safe(root_mul_rule)),
sympy.Derivative: derivative_rule,
TrigonometricFunction: trig_rule,
sympy.Heaviside: heaviside_rule,
OrthogonalPolynomial: orthogonal_poly_rule,
sympy.Number: constant_rule
})),
do_one(
null_safe(trig_rule),
null_safe(alternatives(
rewrites_rule,
substitution_rule,
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
partial_fractions_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
cancel_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.log, sympy.atan, sympy.asin, sympy.acos),
parts_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
distribute_expand_rule),
trig_powers_products_rule,
trig_expand_rule
)),
null_safe(trig_substitution_rule)
),
fallback_rule)(integral)
del _integral_cache[cachekey]
return result
@evaluates(ConstantRule)
def eval_constant(constant, integrand, symbol):
return constant * symbol
@evaluates(ConstantTimesRule)
def eval_constanttimes(constant, other, substep, integrand, symbol):
return constant * _manualintegrate(substep)
@evaluates(PowerRule)
def eval_power(base, exp, integrand, symbol):
return sympy.Piecewise(
((base**(exp + 1))/(exp + 1), sympy.Ne(exp, -1)),
(sympy.log(base), True),
)
@evaluates(ExpRule)
def eval_exp(base, exp, integrand, symbol):
return integrand / sympy.ln(base)
@evaluates(AddRule)
def eval_add(substeps, integrand, symbol):
return sum(map(_manualintegrate, substeps))
@evaluates(URule)
def eval_u(u_var, u_func, constant, substep, integrand, symbol):
result = _manualintegrate(substep)
if u_func.is_Pow and u_func.exp == -1:
# avoid needless -log(1/x) from substitution
result = result.subs(sympy.log(u_var), -sympy.log(u_func.base))
return result.subs(u_var, u_func)
@evaluates(PartsRule)
def eval_parts(u, dv, v_step, second_step, integrand, symbol):
v = _manualintegrate(v_step)
return u * v - _manualintegrate(second_step)
@evaluates(CyclicPartsRule)
def eval_cyclicparts(parts_rules, coefficient, integrand, symbol):
coefficient = 1 - coefficient
result = []
sign = 1
for rule in parts_rules:
result.append(sign * rule.u * _manualintegrate(rule.v_step))
sign *= -1
return sympy.Add(*result) / coefficient
@evaluates(TrigRule)
def eval_trig(func, arg, integrand, symbol):
if func == 'sin':
return -sympy.cos(arg)
elif func == 'cos':
return sympy.sin(arg)
elif func == 'sec*tan':
return sympy.sec(arg)
elif func == 'csc*cot':
return sympy.csc(arg)
elif func == 'sec**2':
return sympy.tan(arg)
elif func == 'csc**2':
return -sympy.cot(arg)
@evaluates(ArctanRule)
def eval_arctan(a, b, c, integrand, symbol):
return a / b * 1 / sympy.sqrt(c / b) * sympy.atan(symbol / sympy.sqrt(c / b))
@evaluates(ArccothRule)
def eval_arccoth(a, b, c, integrand, symbol):
return - a / b * 1 / sympy.sqrt(-c / b) * sympy.acoth(symbol / sympy.sqrt(-c / b))
@evaluates(ArctanhRule)
def eval_arctanh(a, b, c, integrand, symbol):
return - a / b * 1 / sympy.sqrt(-c / b) * sympy.atanh(symbol / sympy.sqrt(-c / b))
@evaluates(ReciprocalRule)
def eval_reciprocal(func, integrand, symbol):
return sympy.ln(func)
@evaluates(ArcsinRule)
def eval_arcsin(integrand, symbol):
return sympy.asin(symbol)
@evaluates(InverseHyperbolicRule)
def eval_inversehyperbolic(func, integrand, symbol):
return func(symbol)
@evaluates(AlternativeRule)
def eval_alternative(alternatives, integrand, symbol):
return _manualintegrate(alternatives[0])
@evaluates(RewriteRule)
def eval_rewrite(rewritten, substep, integrand, symbol):
return _manualintegrate(substep)
@evaluates(PiecewiseRule)
def eval_piecewise(substeps, integrand, symbol):
return sympy.Piecewise(*[(_manualintegrate(substep), cond)
for substep, cond in substeps])
@evaluates(TrigSubstitutionRule)
def eval_trigsubstitution(theta, func, rewritten, substep, restriction, integrand, symbol):
func = func.subs(sympy.sec(theta), 1/sympy.cos(theta))
trig_function = list(func.find(TrigonometricFunction))
assert len(trig_function) == 1
trig_function = trig_function[0]
relation = sympy.solve(symbol - func, trig_function)
assert len(relation) == 1
numer, denom = sympy.fraction(relation[0])
if isinstance(trig_function, sympy.sin):
opposite = numer
hypotenuse = denom
adjacent = sympy.sqrt(denom**2 - numer**2)
inverse = sympy.asin(relation[0])
elif isinstance(trig_function, sympy.cos):
adjacent = numer
hypotenuse = denom
opposite = sympy.sqrt(denom**2 - numer**2)
inverse = sympy.acos(relation[0])
elif isinstance(trig_function, sympy.tan):
opposite = numer
adjacent = denom
hypotenuse = sympy.sqrt(denom**2 + numer**2)
inverse = sympy.atan(relation[0])
substitution = [
(sympy.sin(theta), opposite/hypotenuse),
(sympy.cos(theta), adjacent/hypotenuse),
(sympy.tan(theta), opposite/adjacent),
(theta, inverse)
]
return sympy.Piecewise(
(_manualintegrate(substep).subs(substitution).trigsimp(), restriction)
)
@evaluates(DerivativeRule)
def eval_derivativerule(integrand, symbol):
# isinstance(integrand, Derivative) should be True
variable_count = list(integrand.variable_count)
for i, (var, count) in enumerate(variable_count):
if var == symbol:
variable_count[i] = (var, count-1)
break
return sympy.Derivative(integrand.expr, *variable_count)
@evaluates(HeavisideRule)
def eval_heaviside(harg, ibnd, substep, integrand, symbol):
# If we are integrating over x and the integrand has the form
# Heaviside(m*x+b)*g(x) == Heaviside(harg)*g(symbol)
# then there needs to be continuity at -b/m == ibnd,
# so we subtract the appropriate term.
return sympy.Heaviside(harg)*(substep - substep.subs(symbol, ibnd))
@evaluates(JacobiRule)
def eval_jacobi(n, a, b, integrand, symbol):
return Piecewise(
(2*sympy.jacobi(n + 1, a - 1, b - 1, symbol)/(n + a + b), Ne(n + a + b, 0)),
(symbol, Eq(n, 0)),
((a + b + 2)*symbol**2/4 + (a - b)*symbol/2, Eq(n, 1)))
@evaluates(GegenbauerRule)
def eval_gegenbauer(n, a, integrand, symbol):
return Piecewise(
(sympy.gegenbauer(n + 1, a - 1, symbol)/(2*(a - 1)), Ne(a, 1)),
(sympy.chebyshevt(n + 1, symbol)/(n + 1), Ne(n, -1)),
(sympy.S.Zero, True))
@evaluates(ChebyshevTRule)
def eval_chebyshevt(n, integrand, symbol):
return Piecewise(((sympy.chebyshevt(n + 1, symbol)/(n + 1) -
sympy.chebyshevt(n - 1, symbol)/(n - 1))/2, Ne(sympy.Abs(n), 1)),
(symbol**2/2, True))
@evaluates(ChebyshevURule)
def eval_chebyshevu(n, integrand, symbol):
return Piecewise(
(sympy.chebyshevt(n + 1, symbol)/(n + 1), Ne(n, -1)),
(sympy.S.Zero, True))
@evaluates(LegendreRule)
def eval_legendre(n, integrand, symbol):
return (sympy.legendre(n + 1, symbol) - sympy.legendre(n - 1, symbol))/(2*n + 1)
@evaluates(HermiteRule)
def eval_hermite(n, integrand, symbol):
return sympy.hermite(n + 1, symbol)/(2*(n + 1))
@evaluates(LaguerreRule)
def eval_laguerre(n, integrand, symbol):
return sympy.laguerre(n, symbol) - sympy.laguerre(n + 1, symbol)
@evaluates(AssocLaguerreRule)
def eval_assoclaguerre(n, a, integrand, symbol):
return -sympy.assoc_laguerre(n + 1, a - 1, symbol)
@evaluates(CiRule)
def eval_ci(a, b, integrand, symbol):
return sympy.cos(b)*sympy.Ci(a*symbol) - sympy.sin(b)*sympy.Si(a*symbol)
@evaluates(ChiRule)
def eval_chi(a, b, integrand, symbol):
return sympy.cosh(b)*sympy.Chi(a*symbol) + sympy.sinh(b)*sympy.Shi(a*symbol)
@evaluates(EiRule)
def eval_ei(a, b, integrand, symbol):
return sympy.exp(b)*sympy.Ei(a*symbol)
@evaluates(SiRule)
def eval_si(a, b, integrand, symbol):
return sympy.sin(b)*sympy.Ci(a*symbol) + sympy.cos(b)*sympy.Si(a*symbol)
@evaluates(ShiRule)
def eval_shi(a, b, integrand, symbol):
return sympy.sinh(b)*sympy.Chi(a*symbol) + sympy.cosh(b)*sympy.Shi(a*symbol)
@evaluates(ErfRule)
def eval_erf(a, b, c, integrand, symbol):
if a.is_extended_real:
return Piecewise(
(sympy.sqrt(sympy.pi/(-a))/2 * sympy.exp(c - b**2/(4*a)) *
sympy.erf((-2*a*symbol - b)/(2*sympy.sqrt(-a))), a < 0),
(sympy.sqrt(sympy.pi/a)/2 * sympy.exp(c - b**2/(4*a)) *
sympy.erfi((2*a*symbol + b)/(2*sympy.sqrt(a))), True))
else:
return sympy.sqrt(sympy.pi/a)/2 * sympy.exp(c - b**2/(4*a)) * \
sympy.erfi((2*a*symbol + b)/(2*sympy.sqrt(a)))
@evaluates(FresnelCRule)
def eval_fresnelc(a, b, c, integrand, symbol):
return sympy.sqrt(sympy.pi/(2*a)) * (
sympy.cos(b**2/(4*a) - c)*sympy.fresnelc((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)) +
sympy.sin(b**2/(4*a) - c)*sympy.fresnels((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)))
@evaluates(FresnelSRule)
def eval_fresnels(a, b, c, integrand, symbol):
return sympy.sqrt(sympy.pi/(2*a)) * (
sympy.cos(b**2/(4*a) - c)*sympy.fresnels((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)) -
sympy.sin(b**2/(4*a) - c)*sympy.fresnelc((2*a*symbol + b)/sympy.sqrt(2*a*sympy.pi)))
@evaluates(LiRule)
def eval_li(a, b, integrand, symbol):
return sympy.li(a*symbol + b)/a
@evaluates(PolylogRule)
def eval_polylog(a, b, integrand, symbol):
return sympy.polylog(b + 1, a*symbol)
@evaluates(UpperGammaRule)
def eval_uppergamma(a, e, integrand, symbol):
return symbol**e * (-a*symbol)**(-e) * sympy.uppergamma(e + 1, -a*symbol)/a
@evaluates(EllipticFRule)
def eval_elliptic_f(a, d, integrand, symbol):
return sympy.elliptic_f(symbol, d/a)/sympy.sqrt(a)
@evaluates(EllipticERule)
def eval_elliptic_e(a, d, integrand, symbol):
return sympy.elliptic_e(symbol, d/a)*sympy.sqrt(a)
@evaluates(DontKnowRule)
def eval_dontknowrule(integrand, symbol):
return sympy.Integral(integrand, symbol)
def _manualintegrate(rule):
evaluator = evaluators.get(rule.__class__)
if not evaluator:
raise ValueError("Cannot evaluate rule %s" % repr(rule))
return evaluator(*rule)
def manualintegrate(f, var):
result = _manualintegrate(integral_steps(f, var))
# Clear the cache of u-parts
_parts_u_cache.clear()
# If we got Piecewise with two parts, put generic first
if isinstance(result, Piecewise) and len(result.args) == 2:
cond = result.args[0][1]
if isinstance(cond, Eq) and result.args[1][1] == True:
result = result.func(
(result.args[1][0], sympy.Ne(*cond.args)),
(result.args[0][0], True))
return result
| true | true |
1c3b16400ef2d32f5767944d9dafca6e9323cbf2 | 1,528 | py | Python | vendor/github.com/elastic/beats/metricbeat/scripts/fields_collector.py | PPACI/krakenbeat | e75ec8f006164acb8a57d0c9609bebe534955813 | [
"Apache-2.0"
] | 3 | 2018-01-04T19:15:26.000Z | 2020-02-20T03:35:27.000Z | vendor/github.com/elastic/beats/metricbeat/scripts/fields_collector.py | PPACI/krakenbeat | e75ec8f006164acb8a57d0c9609bebe534955813 | [
"Apache-2.0"
] | null | null | null | vendor/github.com/elastic/beats/metricbeat/scripts/fields_collector.py | PPACI/krakenbeat | e75ec8f006164acb8a57d0c9609bebe534955813 | [
"Apache-2.0"
] | 1 | 2020-10-11T14:57:48.000Z | 2020-10-11T14:57:48.000Z | import os
# Collects fields for all modules and metricset
def collect():
base_dir = "module"
path = os.path.abspath("module")
# yml file
fields_yml = ""
# Iterate over all modules
for module in sorted(os.listdir(base_dir)):
module_fields = path + "/" + module + "/_meta/fields.yml"
# Only check folders where fields.yml exists
if os.path.isfile(module_fields) == False:
continue
# Load module yaml
with file(module_fields) as f:
tmp = f.read()
fields_yml += tmp
# Iterate over all metricsets
for metricset in sorted(os.listdir(base_dir + "/" + module)):
metricset_fields = path + "/" + module + "/" + metricset + "/_meta/fields.yml"
# Only check folders where fields.yml exists
if os.path.isfile(metricset_fields) == False:
continue
# Load metricset yaml
with file(metricset_fields) as f:
# Add 4 spaces for indentation in front of each line
for line in f:
if len(line.strip()) > 0:
fields_yml += " " + " " + line
else:
fields_yml += line
# Add newline to make sure indentation is correct
fields_yml += "\n"
# output string so it can be concatenated
print(fields_yml)
if __name__ == "__main__":
collect()
| 28.296296 | 91 | 0.522906 | import os
def collect():
base_dir = "module"
path = os.path.abspath("module")
fields_yml = ""
for module in sorted(os.listdir(base_dir)):
module_fields = path + "/" + module + "/_meta/fields.yml"
if os.path.isfile(module_fields) == False:
continue
with file(module_fields) as f:
tmp = f.read()
fields_yml += tmp
for metricset in sorted(os.listdir(base_dir + "/" + module)):
metricset_fields = path + "/" + module + "/" + metricset + "/_meta/fields.yml"
if os.path.isfile(metricset_fields) == False:
continue
with file(metricset_fields) as f:
for line in f:
if len(line.strip()) > 0:
fields_yml += " " + " " + line
else:
fields_yml += line
fields_yml += "\n"
print(fields_yml)
if __name__ == "__main__":
collect()
| true | true |
1c3b1647485da720eaeab997c19ab32646c50bab | 1,501 | py | Python | main.py | ajp-A/altsniffer | 6a45a4a8b63aaa05610f3414d4fdd5f0bea90f86 | [
"MIT"
] | null | null | null | main.py | ajp-A/altsniffer | 6a45a4a8b63aaa05610f3414d4fdd5f0bea90f86 | [
"MIT"
] | null | null | null | main.py | ajp-A/altsniffer | 6a45a4a8b63aaa05610f3414d4fdd5f0bea90f86 | [
"MIT"
] | null | null | null | import beautifulsoup4 as bs4
import requests as rq
html_site = rq.get("https://www.poxnora.com/trader/listtrades.do?service=trader/listtrades.do")
soup = BeautifulSoup(html_site, 'html.parser')
if __name__ == '__main__':
main()
"""from bs4 import BeautifulSoup
from os import path
from sys import exit
import pandas as pd
def main():
html_text = ""
file_name = "runepage.html"
if not path.exists(file_name):
print(f"Path does not exist for filename :{file_name}")
exit(1)
with open(file_name, "r") as f:
html_text = f.read()
soup = BeautifulSoup(html_text, 'html.parser')
runes_area_div = soup.find_all(id="runesArea")[0] #TODO: check that len(arr) is 1
rune_slot_divs = runes_area_div.find_all("div", class_="rune_slot")
if len(rune_slot_divs) != 30:
print(f"Expected 30 rune slots... got: {len(rune_slot_divs)}")
else:
print("30 children found as expected.")
runes = []
for rune_slot_div in rune_slot_divs:
tooltip_element = rune_slot_div.find(class_="tooltiptext")
if tooltip_element is None:
print(f"Unable to read span for div: {rune_slot_div}")
tooltip_text: str = tooltip_element.text
name, nora_cost = tooltip_text.split("Nora Cost: ")
row = {"Rune_Name": name, "Nora_Cost": nora_cost}
runes.append(row)
df = pd.DataFrame(runes, columns=["Rune_Name", "Nora_Cost"])
print(df)
return
if __name__ == '__main__':
main()""" | 30.02 | 95 | 0.660893 | import beautifulsoup4 as bs4
import requests as rq
html_site = rq.get("https://www.poxnora.com/trader/listtrades.do?service=trader/listtrades.do")
soup = BeautifulSoup(html_site, 'html.parser')
if __name__ == '__main__':
main()
| true | true |
1c3b16c69b0c5704668f2afab4edc623fff685bf | 5,324 | py | Python | tests/index_test.py | DubeySandeep/pending-review-notification | 353fa74d98eeb6c8386818273a2fe02af39d6b9d | [
"Apache-2.0"
] | null | null | null | tests/index_test.py | DubeySandeep/pending-review-notification | 353fa74d98eeb6c8386818273a2fe02af39d6b9d | [
"Apache-2.0"
] | null | null | null | tests/index_test.py | DubeySandeep/pending-review-notification | 353fa74d98eeb6c8386818273a2fe02af39d6b9d | [
"Apache-2.0"
] | 1 | 2021-10-20T16:24:04.000Z | 2021-10-20T16:24:04.000Z | """Unit test for the index.py file."""
import unittest
from datetime import datetime, timedelta, timezone
import json
from unittest.mock import patch, mock_open
import requests_mock
from src import index
from src import github_services
class ModuleIntegerationTest(unittest.TestCase):
"""Integeration test for the send notification feature."""
def setUp(self):
self.orgName = 'orgName'
self.repoName = 'repo'
self.pull_response = [{
'html_url': 'https://githuburl.pull/123',
'number': 123,
'title': 'PR title 1',
'user': {
'login': 'authorName',
},
'assignees': [{
'login': 'reviewerName1',
}, {
'login': 'reviewerName2',
}]
}, {
'html_url': 'https://githuburl.pull/234',
'number': 234,
'title': 'PR title 2',
'user': {
'login': 'authorName',
},
'assignees': [{
'login': 'reviewerName1',
}, {
'login': 'reviewerName2',
}]
}]
def get_past_time(hours=0):
return (
datetime.now(timezone.utc) - timedelta(hours=hours)).strftime(
"%Y-%m-%dT%H:%M:%SZ")
self.timeline1 = [{
'event': 'created'
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName1'
},
'created_at': get_past_time(hours=22)
},{
'event': 'assigned',
'assignee': {
'login': 'reviewerName2'
},
'created_at': get_past_time(hours=56)
}]
self.timeline2 = [{
'event': 'created'
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName1'
},
'created_at': get_past_time(hours=23)
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName2'
},
'created_at': get_past_time(hours=19)
}]
self.test_template = "{{ username }}\n{{ pr_list }}"
def mock_all_get_requests(self, mock_request):
param_page_1='?page=1&per_page=100'
param_page_2='?page=2&per_page=100'
mock_request.get(
github_services.PULL_REQUESTS_URL_TEMPLATE.format(
self.orgName, self.repoName) + param_page_1,
text=json.dumps(self.pull_response))
mock_request.get(
github_services.PULL_REQUESTS_URL_TEMPLATE.format(
self.orgName, self.repoName) + param_page_2,
text=json.dumps([]))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 123) + param_page_1,
text=json.dumps(self.timeline1))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 123) + param_page_2,
text=json.dumps([]))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 234) + param_page_1,
text=json.dumps(self.timeline2))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 234) + param_page_2,
text=json.dumps([]))
def mock_post_discussion_request(self, mock_request):
request = mock_request.post(
github_services.CREATE_DISCUSSION_URL_TEMPLATE.format(
self.orgName, 'teamName'),
text=json.dumps({}))
return request
def test_executing_main_function_sends_notification(self):
with requests_mock.Mocker() as mock_request:
self.mock_all_get_requests(mock_request)
request = self.mock_post_discussion_request(mock_request)
file_data = mock_open(read_data=self.test_template)
with patch("builtins.open", file_data):
index.main([
'--team', 'teamName',
'--repo', 'orgName/repo',
'--max-wait-hours', '20',
'--token', 'githubTokenForApiRequest'
])
self.assertTrue(request.called)
self.assertEqual(request.call_count, 2)
expected_messages = [
{
'title': '[@reviewerName1] Pending review on PRs',
'body': '@reviewerName1\n- [#123](https://githuburl.pull/123) '
'[Waiting from the last 22 hours]\n'
'- [#234](https://githuburl.pull/234) '
'[Waiting from the last 23 hours]'
},
{
'title': '[@reviewerName2] Pending review on PRs',
'body': '@reviewerName2\n- [#123](https://githuburl.pull/123) '
'[Waiting from the last 2 days, 8 hours]'
},
]
self.assertEqual(
request.request_history[0].json(), expected_messages[0])
self.assertEqual(
request.request_history[1].json(), expected_messages[1])
| 35.493333 | 79 | 0.523666 |
import unittest
from datetime import datetime, timedelta, timezone
import json
from unittest.mock import patch, mock_open
import requests_mock
from src import index
from src import github_services
class ModuleIntegerationTest(unittest.TestCase):
def setUp(self):
self.orgName = 'orgName'
self.repoName = 'repo'
self.pull_response = [{
'html_url': 'https://githuburl.pull/123',
'number': 123,
'title': 'PR title 1',
'user': {
'login': 'authorName',
},
'assignees': [{
'login': 'reviewerName1',
}, {
'login': 'reviewerName2',
}]
}, {
'html_url': 'https://githuburl.pull/234',
'number': 234,
'title': 'PR title 2',
'user': {
'login': 'authorName',
},
'assignees': [{
'login': 'reviewerName1',
}, {
'login': 'reviewerName2',
}]
}]
def get_past_time(hours=0):
return (
datetime.now(timezone.utc) - timedelta(hours=hours)).strftime(
"%Y-%m-%dT%H:%M:%SZ")
self.timeline1 = [{
'event': 'created'
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName1'
},
'created_at': get_past_time(hours=22)
},{
'event': 'assigned',
'assignee': {
'login': 'reviewerName2'
},
'created_at': get_past_time(hours=56)
}]
self.timeline2 = [{
'event': 'created'
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName1'
},
'created_at': get_past_time(hours=23)
}, {
'event': 'assigned',
'assignee': {
'login': 'reviewerName2'
},
'created_at': get_past_time(hours=19)
}]
self.test_template = "{{ username }}\n{{ pr_list }}"
def mock_all_get_requests(self, mock_request):
param_page_1='?page=1&per_page=100'
param_page_2='?page=2&per_page=100'
mock_request.get(
github_services.PULL_REQUESTS_URL_TEMPLATE.format(
self.orgName, self.repoName) + param_page_1,
text=json.dumps(self.pull_response))
mock_request.get(
github_services.PULL_REQUESTS_URL_TEMPLATE.format(
self.orgName, self.repoName) + param_page_2,
text=json.dumps([]))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 123) + param_page_1,
text=json.dumps(self.timeline1))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 123) + param_page_2,
text=json.dumps([]))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 234) + param_page_1,
text=json.dumps(self.timeline2))
mock_request.get(
github_services.ISSUE_TIMELINE_URL_TEMPLATE.format(
self.orgName, self.repoName, 234) + param_page_2,
text=json.dumps([]))
def mock_post_discussion_request(self, mock_request):
request = mock_request.post(
github_services.CREATE_DISCUSSION_URL_TEMPLATE.format(
self.orgName, 'teamName'),
text=json.dumps({}))
return request
def test_executing_main_function_sends_notification(self):
with requests_mock.Mocker() as mock_request:
self.mock_all_get_requests(mock_request)
request = self.mock_post_discussion_request(mock_request)
file_data = mock_open(read_data=self.test_template)
with patch("builtins.open", file_data):
index.main([
'--team', 'teamName',
'--repo', 'orgName/repo',
'--max-wait-hours', '20',
'--token', 'githubTokenForApiRequest'
])
self.assertTrue(request.called)
self.assertEqual(request.call_count, 2)
expected_messages = [
{
'title': '[@reviewerName1] Pending review on PRs',
'body': '@reviewerName1\n- [#123](https://githuburl.pull/123) '
'[Waiting from the last 22 hours]\n'
'- [#234](https://githuburl.pull/234) '
'[Waiting from the last 23 hours]'
},
{
'title': '[@reviewerName2] Pending review on PRs',
'body': '@reviewerName2\n- [#123](https://githuburl.pull/123) '
'[Waiting from the last 2 days, 8 hours]'
},
]
self.assertEqual(
request.request_history[0].json(), expected_messages[0])
self.assertEqual(
request.request_history[1].json(), expected_messages[1])
| true | true |
1c3b16d7582cd20af1931a5780603ab312df1df6 | 5,335 | py | Python | sinergym/utils/evaluation.py | jajimer/sinergym | 685bcb3cda8095eef1add2b5d12e0ce102efefe9 | [
"MIT"
] | 23 | 2021-10-30T15:42:24.000Z | 2022-03-29T13:27:39.000Z | sinergym/utils/evaluation.py | jajimer/sinergym | 685bcb3cda8095eef1add2b5d12e0ce102efefe9 | [
"MIT"
] | 93 | 2021-09-30T09:05:31.000Z | 2022-03-31T18:11:57.000Z | sinergym/utils/evaluation.py | jajimer/sinergym | 685bcb3cda8095eef1add2b5d12e0ce102efefe9 | [
"MIT"
] | 7 | 2021-11-24T10:28:42.000Z | 2022-03-04T14:11:29.000Z | """Custom policy evaluations for Evaluation Callbacks."""
from typing import Any, Callable, Dict, Optional, Union
import gym
import numpy as np
from stable_baselines3.common import base_class
from stable_baselines3.common.vec_env import VecEnv
def evaluate_policy(model: "base_class.BaseAlgorithm",
env: Union[gym.Env,
VecEnv],
n_eval_episodes: int = 5,
deterministic: bool = True,
render: bool = False,
callback: Optional[Callable[[Dict[str,
Any],
Dict[str,
Any]],
None]] = None,
) -> Any:
"""Runs policy for n_eval_episodes episodes and returns average reward. This is made to work only with one env.
.. note:: If environment has not been wrapped with Monitor wrapper, reward and
episode lengths are counted as it appears with env.step calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with Monitor
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment. In the case of a VecEnv this must contain only one environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks, called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode, this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths per episode will be returned instead of the mean.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when return_episode_rewards is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
result = {
'episodes_rewards': [],
'episodes_lengths': [],
'episodes_powers': [],
'episodes_comfort_violations': [],
'episodes_comfort_penalties': [],
'episodes_power_penalties': []
}
episodes_executed = 0
not_reseted = True
while episodes_executed < n_eval_episodes:
# Number of loops here might differ from true episodes
# played, if underlying wrappers modify episode lengths.
# Avoid double reset, as VecEnv are reset automatically.
if not isinstance(env, VecEnv) or not_reseted:
# obs = list(map(
# lambda obs_dict: np.array(list(obs_dict.values()), dtype=np.float32),
# env.get_attr('obs_dict')))
obs = env.reset()
not_reseted = False
done, state = False, None
episode_reward = 0.0
episode_length = 0
episode_steps_comfort_violation = 0
episode_power = 0.0
episode_comfort_penalty = 0.0
episode_power_penalty = 0.0
# ---------------------------------------------------------------------------- #
# Running episode and accumulate values #
# ---------------------------------------------------------------------------- #
while not done:
action, state = model.predict(
obs, state=state, deterministic=deterministic)
obs, reward, done, info = env.step(action)
episode_reward += reward
episode_power += info[0]['total_power']
episode_power_penalty += info[0]['total_power_no_units']
episode_comfort_penalty += info[0]['comfort_penalty']
if info[0]['comfort_penalty'] != 0:
episode_steps_comfort_violation += 1
if callback is not None:
callback(locals(), globals())
episode_length += 1
if render:
env.render()
episodes_executed += 1
# ---------------------------------------------------------------------------- #
# Storing accumulated values in result #
# ---------------------------------------------------------------------------- #
result['episodes_rewards'].append(episode_reward)
result['episodes_lengths'].append(episode_length)
result['episodes_powers'].append(episode_power)
try:
result['episodes_comfort_violations'].append(
episode_steps_comfort_violation / episode_length * 100)
except ZeroDivisionError:
result['episodes_comfort_violations'].append(np.nan)
result['episodes_comfort_penalties'].append(episode_comfort_penalty)
result['episodes_power_penalties'].append(episode_power_penalty)
return result
| 50.809524 | 141 | 0.56926 |
from typing import Any, Callable, Dict, Optional, Union
import gym
import numpy as np
from stable_baselines3.common import base_class
from stable_baselines3.common.vec_env import VecEnv
def evaluate_policy(model: "base_class.BaseAlgorithm",
env: Union[gym.Env,
VecEnv],
n_eval_episodes: int = 5,
deterministic: bool = True,
render: bool = False,
callback: Optional[Callable[[Dict[str,
Any],
Dict[str,
Any]],
None]] = None,
) -> Any:
result = {
'episodes_rewards': [],
'episodes_lengths': [],
'episodes_powers': [],
'episodes_comfort_violations': [],
'episodes_comfort_penalties': [],
'episodes_power_penalties': []
}
episodes_executed = 0
not_reseted = True
while episodes_executed < n_eval_episodes:
if not isinstance(env, VecEnv) or not_reseted:
obs = env.reset()
not_reseted = False
done, state = False, None
episode_reward = 0.0
episode_length = 0
episode_steps_comfort_violation = 0
episode_power = 0.0
episode_comfort_penalty = 0.0
episode_power_penalty = 0.0
while not done:
action, state = model.predict(
obs, state=state, deterministic=deterministic)
obs, reward, done, info = env.step(action)
episode_reward += reward
episode_power += info[0]['total_power']
episode_power_penalty += info[0]['total_power_no_units']
episode_comfort_penalty += info[0]['comfort_penalty']
if info[0]['comfort_penalty'] != 0:
episode_steps_comfort_violation += 1
if callback is not None:
callback(locals(), globals())
episode_length += 1
if render:
env.render()
episodes_executed += 1
result['episodes_rewards'].append(episode_reward)
result['episodes_lengths'].append(episode_length)
result['episodes_powers'].append(episode_power)
try:
result['episodes_comfort_violations'].append(
episode_steps_comfort_violation / episode_length * 100)
except ZeroDivisionError:
result['episodes_comfort_violations'].append(np.nan)
result['episodes_comfort_penalties'].append(episode_comfort_penalty)
result['episodes_power_penalties'].append(episode_power_penalty)
return result
| true | true |
1c3b17069c9737cf2b01ab0b39710f8ac86971f0 | 1,091 | py | Python | src/agents/util/agent_credentials.py | MelleStarke/MAS_B06 | fbedc7459006c0915428f0122d923dd41b57b51e | [
"MIT"
] | null | null | null | src/agents/util/agent_credentials.py | MelleStarke/MAS_B06 | fbedc7459006c0915428f0122d923dd41b57b51e | [
"MIT"
] | null | null | null | src/agents/util/agent_credentials.py | MelleStarke/MAS_B06 | fbedc7459006c0915428f0122d923dd41b57b51e | [
"MIT"
] | null | null | null | sender = ("masb6-sender@01337.io", "sender-pass")
receiver = ("masb6-receiver@01337.io", "receiver-pass")
pra = ("masb6-pra@01337.io", "pra-pass")
kma = ("masb6-kma@01337.io", "kma-pass")
ca = ("masb6-ca@01337.io", "ca-pass")
oaa = ("masb6-oaa@01337.io", "oaa-pass")
ssa = ("masb6-ssa@01337.io", "ssa-pass")
oa = ("masb6-oa@01337.io", "oa-pass")
vra = ("masb6-vra@01337.io", "vra-pass")
sa1 = ("masb6-sa1@01337.io", "sa1-pass")
sa2 = ("masb6-sa2@01337.io", "sa2-pass")
sa3 = ("masb6-sa3@01337.io", "sa3-pass")
sa4 = ("masb6-sa4@01337.io", "sa4-pass")
sa5 = ("masb6-sa5@01337.io", "sa5-pass")
sa6 = ("masb6-sa6@01337.io", "sa6-pass")
sa7 = ("masb6-sa7@01337.io", "sa7-pass")
sa8 = ("masb6-sa8@01337.io", "sa8-pass")
sa9 = ("masb6-sa9@01337.io", "sa9-pass")
sa10 = ("masb6-sa10@01337.io", "sa10-pass")
va1 = ("masb6-va1@01337.io", "va1-pass")
va2 = ("masb6-va1@01337.io", "va2-pass")
"""
{"masb6-sa3@01337.io": 3, "masb6-sa1@01337.io": 1, "masb6-sa4@01337.io": 4, "masb6-sa2@01337.io": 2}
""" | 40.407407 | 100 | 0.55912 | sender = ("masb6-sender@01337.io", "sender-pass")
receiver = ("masb6-receiver@01337.io", "receiver-pass")
pra = ("masb6-pra@01337.io", "pra-pass")
kma = ("masb6-kma@01337.io", "kma-pass")
ca = ("masb6-ca@01337.io", "ca-pass")
oaa = ("masb6-oaa@01337.io", "oaa-pass")
ssa = ("masb6-ssa@01337.io", "ssa-pass")
oa = ("masb6-oa@01337.io", "oa-pass")
vra = ("masb6-vra@01337.io", "vra-pass")
sa1 = ("masb6-sa1@01337.io", "sa1-pass")
sa2 = ("masb6-sa2@01337.io", "sa2-pass")
sa3 = ("masb6-sa3@01337.io", "sa3-pass")
sa4 = ("masb6-sa4@01337.io", "sa4-pass")
sa5 = ("masb6-sa5@01337.io", "sa5-pass")
sa6 = ("masb6-sa6@01337.io", "sa6-pass")
sa7 = ("masb6-sa7@01337.io", "sa7-pass")
sa8 = ("masb6-sa8@01337.io", "sa8-pass")
sa9 = ("masb6-sa9@01337.io", "sa9-pass")
sa10 = ("masb6-sa10@01337.io", "sa10-pass")
va1 = ("masb6-va1@01337.io", "va1-pass")
va2 = ("masb6-va1@01337.io", "va2-pass") | true | true |
1c3b170fd3f62bce27b92d5cb1ef3d235cf983ae | 454 | py | Python | manage.py | sladebot/traffiko-data | 459d13185057666a2dbee22031e3ecc549ff5e37 | [
"MIT"
] | null | null | null | manage.py | sladebot/traffiko-data | 459d13185057666a2dbee22031e3ecc549ff5e37 | [
"MIT"
] | null | null | null | manage.py | sladebot/traffiko-data | 459d13185057666a2dbee22031e3ecc549ff5e37 | [
"MIT"
] | null | null | null | # This file starts the WSGI web application.
# - Heroku starts gunicorn, which loads Procfile, which starts manage.py
# - Developers can run it from the command line: python runserver.py
from main import manager, server
# Start a development web server, processing extra command line parameters. E.g.:
# - python manage.py init_db
# - python manage.py runserver
if __name__ == "__main__":
manager.add_command("runserver", server)
manager.run()
| 34.923077 | 81 | 0.751101 |
from main import manager, server
if __name__ == "__main__":
manager.add_command("runserver", server)
manager.run()
| true | true |
1c3b1735c55d3fa7f817015dbdaa357c535f0a45 | 5,087 | py | Python | 25_clock_signal.py | KanegaeGabriel/advent-of-code-2016 | 68a46604ebae7d96a196c1fcf2666f1e74ee2999 | [
"MIT"
] | null | null | null | 25_clock_signal.py | KanegaeGabriel/advent-of-code-2016 | 68a46604ebae7d96a196c1fcf2666f1e74ee2999 | [
"MIT"
] | null | null | null | 25_clock_signal.py | KanegaeGabriel/advent-of-code-2016 | 68a46604ebae7d96a196c1fcf2666f1e74ee2999 | [
"MIT"
] | null | null | null | ################################
# --- Day 25: Clock Signal --- #
################################
import AOCUtils
class VM:
def __init__(self, program):
self.program = program[:]
self.pc = 0
self.registers = {"a": 0, "b": 0, "c": 0, "d": 0}
self.lastOutput = 1
self.outputLength = 0
self.loops = False
def run(self):
while self.pc < len(self.program):
cmd = self.program[self.pc].split()
inst = cmd[0]
x = cmd[1]
xVal = int(x) if not x.isalpha() else self.registers[x]
if len(cmd) > 2:
y = cmd[2]
yVal = int(y) if not y.isalpha() else self.registers[y]
if inst == "cpy":
self.registers[y] = xVal
elif inst == "inc":
self.registers[x] += 1
elif inst == "dec":
self.registers[x] -= 1
elif inst == "jnz":
if xVal != 0:
self.pc += yVal - 1
elif inst == "out":
if xVal == self.lastOutput:
break
self.lastOutput = xVal
self.outputLength += 1
# Assume that the clock loops forever if it keeps oscillating after 100 cycles
if self.outputLength > 100:
self.loops = True
break
self.pc += 1
################################
program = AOCUtils.loadInput(25)
# i = 0
# while True:
# vm = VM(program)
# vm.registers["a"] = i
# vm.run()
# if vm.loops:
# print("Part 1: {}".format(i))
# break
# i += 1
X = int(program[1].split()[1])
Y = int(program[2].split()[1])
n = 1
while True:
repeat = int("10"*n, 2)
if repeat > X * Y: break
n += 1
a = repeat - X * Y
print("Part 1: {}".format(a))
AOCUtils.printTimeTaken()
# Part 1: Smallest a such that a + x*y = 0b10...10
"""
| | <<<<<<<<<<<<<<<<<<<<<<<<<<<<<< | while True: | loop forever:
0 | cpy a d | d = a ^ | d = a |
1 | cpy 7 c | c = 7 ^ | |
| | ^ | |
2 | cpy 362 b | b = 362 <<<<<<<<<<<<< ^ | d += 362 * 7 |
3 | inc d | d += 1 <<<<<<<<<<< ^ ^ | |
4 | dec b | b -= 1 ^ ^ ^ | |
5 | jnz b -2 | while b != 0: >>>> ^ ^ | |
6 | dec c | c -= 1 ^ ^ | |
7 | jnz c -5 | while c != 0: >>>>>>> ^ | |
| | ^ | |
8 | cpy d a | a = d ^ | a = d | a = a + 362 * 7
| | ^ | |
9 | jnz 0 0 | <<<<<<<<<<<<<<<<<<<<<<<<<<< ^ | while a != 0: | while a != 0:
10 | cpy a b | b = a ^ ^ | b = a |
11 | cpy 0 a | a = 0 ^ ^ | a = 0 |
12 | cpy 2 c | c = 2 <<<<<<<<<<<<<<< ^ ^ | c = 2 |
13 | jnz b 2 | if b != 0: >>>> << ^ ^ ^ | while b != 0 and c != 0: |
14 | jnz 1 6 | >>>>>>>>>>>>. v ^ ^ .v ^ ^ | |
15 | dec b | b -= 1 <<<<<<<< ^ ^ v ^ ^ | b -= 1 |
16 | dec c | c -= 1 ^ ^ v ^ ^ | c -= 1 |
17 | jnz c -4 | while c != 0: >>>> ^ v ^ ^ | |
18 | inc a | a += 1 ^ v ^ ^ | a += 1 | a = b // 2
19 | jnz 1 -7 | >>>>>>>>>>>>>>>>>>>>> v ^ ^ | | c = 2 - (a % 2)
20 | cpy 2 b | <<<<<<<<<<<<<<<<<<<<<<<< ^ ^ | |
| | ^ ^ | |
21 | jnz c 2 | if c != 0: >>>>>>>v << ^ ^ | while c != 0: | b = 2 - c = a % 2
22 | jnz 1 4 | >>>>>>>>>>>>>>>> v ^ ^ ^ | |
23 | dec b | b -= 1 <<<<<<. v .v ^ ^ ^ | b -= 1 |
24 | dec c | c -= 1 v ^ ^ ^ | c -= 1 |
25 | jnz 1 -4 | >>>>>>>>>>>>>. v .>>>> ^ ^ | |
26 | jnz 0 0 | <<<<<<<<<<<<<<<< ^ ^ | |
| | ^ ^ | |
27 | out b | out b ^ ^ | out b | print(b)
28 | jnz a -19 | if a != 0: >>>>>>>>>>>>>>>> ^ | |
29 | jnz 1 -21 | >>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | |
""" | 42.041322 | 103 | 0.244348 | = self.lastOutput:
break
self.lastOutput = xVal
self.outputLength += 1
if self.outputLength > 100:
self.loops = True
break
self.pc += 1
| true | true |
1c3b196bf441c75de9b48e2bfc9dfd8364686b6a | 2,579 | py | Python | src/sdk/pynni/nni/platform/local.py | logenBupt/nni | 699ec812e749ae8d060c3862d5775864ab043cd2 | [
"MIT"
] | null | null | null | src/sdk/pynni/nni/platform/local.py | logenBupt/nni | 699ec812e749ae8d060c3862d5775864ab043cd2 | [
"MIT"
] | null | null | null | src/sdk/pynni/nni/platform/local.py | logenBupt/nni | 699ec812e749ae8d060c3862d5775864ab043cd2 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
import os
import json
import time
import json_tricks
from ..common import init_logger, env_args
_sysdir = os.environ['NNI_SYS_DIR']
if not os.path.exists(os.path.join(_sysdir, '.nni')):
os.makedirs(os.path.join(_sysdir, '.nni'))
_metric_file = open(os.path.join(_sysdir, '.nni', 'metrics'), 'wb')
_outputdir = os.environ['NNI_OUTPUT_DIR']
if not os.path.exists(_outputdir):
os.makedirs(_outputdir)
_log_file_path = os.path.join(_outputdir, 'trial.log')
init_logger(_log_file_path)
_param_index = 0
def request_next_parameter():
metric = json_tricks.dumps({
'trial_job_id': env_args.trial_job_id,
'type': 'REQUEST_PARAMETER',
'sequence': 0,
'parameter_index': _param_index
})
send_metric(metric)
def get_parameters():
global _param_index
params_filepath = os.path.join(_sysdir, 'parameter_{}.cfg'.format(_param_index))
if not os.path.isfile(params_filepath):
request_next_parameter()
while not os.path.isfile(params_filepath):
time.sleep(3)
params_file = open(params_filepath, 'r')
params = json.load(params_file)
_param_index += 1
return params
def send_metric(string):
data = (string + '\n').encode('utf8')
assert len(data) < 1000000, 'Metric too long'
_metric_file.write(b'ME%06d%b' % (len(data), data))
_metric_file.flush()
| 38.492537 | 100 | 0.705312 |
import os
import json
import time
import json_tricks
from ..common import init_logger, env_args
_sysdir = os.environ['NNI_SYS_DIR']
if not os.path.exists(os.path.join(_sysdir, '.nni')):
os.makedirs(os.path.join(_sysdir, '.nni'))
_metric_file = open(os.path.join(_sysdir, '.nni', 'metrics'), 'wb')
_outputdir = os.environ['NNI_OUTPUT_DIR']
if not os.path.exists(_outputdir):
os.makedirs(_outputdir)
_log_file_path = os.path.join(_outputdir, 'trial.log')
init_logger(_log_file_path)
_param_index = 0
def request_next_parameter():
metric = json_tricks.dumps({
'trial_job_id': env_args.trial_job_id,
'type': 'REQUEST_PARAMETER',
'sequence': 0,
'parameter_index': _param_index
})
send_metric(metric)
def get_parameters():
global _param_index
params_filepath = os.path.join(_sysdir, 'parameter_{}.cfg'.format(_param_index))
if not os.path.isfile(params_filepath):
request_next_parameter()
while not os.path.isfile(params_filepath):
time.sleep(3)
params_file = open(params_filepath, 'r')
params = json.load(params_file)
_param_index += 1
return params
def send_metric(string):
data = (string + '\n').encode('utf8')
assert len(data) < 1000000, 'Metric too long'
_metric_file.write(b'ME%06d%b' % (len(data), data))
_metric_file.flush()
| true | true |
1c3b197d31e7c6ec15cd503dbc7f1b1a4607c696 | 1,711 | py | Python | profiles_api/models.py | rs2pydev/profiles-rest-api | e2fc7f0cf664586bf688b7e1b921eb5ae8bcfaf2 | [
"MIT"
] | null | null | null | profiles_api/models.py | rs2pydev/profiles-rest-api | e2fc7f0cf664586bf688b7e1b921eb5ae8bcfaf2 | [
"MIT"
] | null | null | null | profiles_api/models.py | rs2pydev/profiles-rest-api | e2fc7f0cf664586bf688b7e1b921eb5ae8bcfaf2 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of our user"""
return self.email | 30.553571 | 67 | 0.649912 | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
class UserProfileManager(BaseUserManager):
def create_user(self, email, name, password=None):
if not email:
raise ValueError('User must have an email address')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ['name']
def get_full_name(self):
return self.name
def get_short_name(self):
return self.name
def __str__(self):
return self.email | true | true |
1c3b19df73a5d7575da69233da553894bb996fb0 | 7,661 | py | Python | python_modules/dagster/dagster/core/launcher/default_run_launcher.py | coderanger/dagster | d3e323f8ed55cd906d6f44f19595348ea1580b2d | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/launcher/default_run_launcher.py | coderanger/dagster | d3e323f8ed55cd906d6f44f19595348ea1580b2d | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/launcher/default_run_launcher.py | coderanger/dagster | d3e323f8ed55cd906d6f44f19595348ea1580b2d | [
"Apache-2.0"
] | null | null | null | import time
import weakref
import grpc
from dagster import check, seven
from dagster.core.errors import DagsterLaunchFailedError
from dagster.core.host_representation import ExternalPipeline
from dagster.core.host_representation.handle import (
GrpcServerRepositoryLocationHandle,
ManagedGrpcPythonEnvRepositoryLocationHandle,
)
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.core.storage.tags import GRPC_INFO_TAG
from dagster.grpc.client import DagsterGrpcClient
from dagster.grpc.types import (
CanCancelExecutionRequest,
CancelExecutionRequest,
ExecuteExternalPipelineArgs,
)
from dagster.serdes import ConfigurableClass
from dagster.utils import merge_dicts
from .base import RunLauncher
GRPC_REPOSITORY_LOCATION_HANDLE_TYPES = (
GrpcServerRepositoryLocationHandle,
ManagedGrpcPythonEnvRepositoryLocationHandle,
)
class DefaultRunLauncher(RunLauncher, ConfigurableClass):
"""Launches runs against running GRPC servers.
"""
def __init__(self, inst_data=None):
self._instance_weakref = None
self._inst_data = inst_data
# Used for test cleanup purposes only
self._run_id_to_repository_location_handle_cache = {}
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {}
@staticmethod
def from_config_value(inst_data, config_value):
return DefaultRunLauncher(inst_data=inst_data)
@property
def _instance(self):
return self._instance_weakref() if self._instance_weakref else None
def initialize(self, instance):
check.inst_param(instance, "instance", DagsterInstance)
check.invariant(self._instance is None, "Must only call initialize once")
# Store a weakref to avoid a circular reference / enable GC
self._instance_weakref = weakref.ref(instance)
def launch_run(self, instance, run, external_pipeline):
check.inst_param(run, "run", PipelineRun)
check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
repository_location_handle = external_pipeline.repository_handle.repository_location_handle
check.inst(
repository_location_handle,
GRPC_REPOSITORY_LOCATION_HANDLE_TYPES,
"DefaultRunLauncher: Can't launch runs for pipeline not loaded from a GRPC server",
)
self._instance.add_run_tags(
run.run_id,
{
GRPC_INFO_TAG: seven.json.dumps(
merge_dicts(
{"host": repository_location_handle.host},
{"port": repository_location_handle.port}
if repository_location_handle.port
else {"socket": repository_location_handle.socket},
)
)
},
)
res = repository_location_handle.client.start_run(
ExecuteExternalPipelineArgs(
pipeline_origin=external_pipeline.get_external_origin(),
pipeline_run_id=run.run_id,
instance_ref=self._instance.get_ref(),
)
)
if not res.success:
raise (
DagsterLaunchFailedError(
res.message, serializable_error_info=res.serializable_error_info
)
)
self._run_id_to_repository_location_handle_cache[run.run_id] = repository_location_handle
return run
def _get_grpc_client_for_termination(self, run_id):
if not self._instance:
return None
run = self._instance.get_run_by_id(run_id)
if not run or run.is_finished:
return None
tags = run.tags
if GRPC_INFO_TAG not in tags:
return None
grpc_info = seven.json.loads(tags.get(GRPC_INFO_TAG))
return DagsterGrpcClient(
port=grpc_info.get("port"), socket=grpc_info.get("socket"), host=grpc_info.get("host")
)
def can_terminate(self, run_id):
check.str_param(run_id, "run_id")
client = self._get_grpc_client_for_termination(run_id)
if not client:
return False
try:
res = client.can_cancel_execution(CanCancelExecutionRequest(run_id=run_id), timeout=5)
except grpc._channel._InactiveRpcError: # pylint: disable=protected-access
# Server that created the run may no longer exist
return False
return res.can_cancel
def terminate(self, run_id):
check.str_param(run_id, "run_id")
if not self._instance:
return False
run = self._instance.get_run_by_id(run_id)
if not run:
return False
self._instance.report_engine_event(
message="Received pipeline termination request.", pipeline_run=run, cls=self.__class__
)
client = self._get_grpc_client_for_termination(run_id)
if not client:
self._instance.report_engine_event(
message="Unable to get grpc client to send termination request to.",
pipeline_run=run,
cls=self.__class__,
)
return False
res = client.cancel_execution(CancelExecutionRequest(run_id=run_id))
return res.success
def join(self, timeout=30):
# If this hasn't been initialized at all, we can just do a noop
if not self._instance:
return
total_time = 0
interval = 0.01
while True:
active_run_ids = [
run_id
for run_id in self._run_id_to_repository_location_handle_cache.keys()
if (
self._instance.get_run_by_id(run_id)
and not self._instance.get_run_by_id(run_id).is_finished
)
]
if len(active_run_ids) == 0:
return
if total_time >= timeout:
raise Exception(
"Timed out waiting for these runs to finish: {active_run_ids}".format(
active_run_ids=repr(active_run_ids)
)
)
total_time += interval
time.sleep(interval)
interval = interval * 2
def cleanup_managed_grpc_servers(self):
"""Shut down any managed grpc servers that used this run launcher to start a run.
Should only be used for teardown purposes within tests (generally it's fine for a server
to out-live the host process, since it might be finishing an execution and will
automatically shut itself down once it no longer receives a heartbeat from the host
process). But in tests, gRPC servers access the DagsterInstance during execution, so we need
to shut them down before we can safely remove the temporary directory created for the
DagsterInstance.
"""
for repository_location_handle in self._run_id_to_repository_location_handle_cache.values():
if isinstance(repository_location_handle, ManagedGrpcPythonEnvRepositoryLocationHandle):
check.invariant(
repository_location_handle.is_cleaned_up,
"ManagedGrpcPythonRepositoryLocationHandle was not cleaned up "
"before test teardown. This may indicate that the handle is not "
"being used as a contextmanager.",
)
repository_location_handle.grpc_server_process.wait()
| 34.822727 | 100 | 0.645869 | import time
import weakref
import grpc
from dagster import check, seven
from dagster.core.errors import DagsterLaunchFailedError
from dagster.core.host_representation import ExternalPipeline
from dagster.core.host_representation.handle import (
GrpcServerRepositoryLocationHandle,
ManagedGrpcPythonEnvRepositoryLocationHandle,
)
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.core.storage.tags import GRPC_INFO_TAG
from dagster.grpc.client import DagsterGrpcClient
from dagster.grpc.types import (
CanCancelExecutionRequest,
CancelExecutionRequest,
ExecuteExternalPipelineArgs,
)
from dagster.serdes import ConfigurableClass
from dagster.utils import merge_dicts
from .base import RunLauncher
GRPC_REPOSITORY_LOCATION_HANDLE_TYPES = (
GrpcServerRepositoryLocationHandle,
ManagedGrpcPythonEnvRepositoryLocationHandle,
)
class DefaultRunLauncher(RunLauncher, ConfigurableClass):
def __init__(self, inst_data=None):
self._instance_weakref = None
self._inst_data = inst_data
self._run_id_to_repository_location_handle_cache = {}
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {}
@staticmethod
def from_config_value(inst_data, config_value):
return DefaultRunLauncher(inst_data=inst_data)
@property
def _instance(self):
return self._instance_weakref() if self._instance_weakref else None
def initialize(self, instance):
check.inst_param(instance, "instance", DagsterInstance)
check.invariant(self._instance is None, "Must only call initialize once")
self._instance_weakref = weakref.ref(instance)
def launch_run(self, instance, run, external_pipeline):
check.inst_param(run, "run", PipelineRun)
check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
repository_location_handle = external_pipeline.repository_handle.repository_location_handle
check.inst(
repository_location_handle,
GRPC_REPOSITORY_LOCATION_HANDLE_TYPES,
"DefaultRunLauncher: Can't launch runs for pipeline not loaded from a GRPC server",
)
self._instance.add_run_tags(
run.run_id,
{
GRPC_INFO_TAG: seven.json.dumps(
merge_dicts(
{"host": repository_location_handle.host},
{"port": repository_location_handle.port}
if repository_location_handle.port
else {"socket": repository_location_handle.socket},
)
)
},
)
res = repository_location_handle.client.start_run(
ExecuteExternalPipelineArgs(
pipeline_origin=external_pipeline.get_external_origin(),
pipeline_run_id=run.run_id,
instance_ref=self._instance.get_ref(),
)
)
if not res.success:
raise (
DagsterLaunchFailedError(
res.message, serializable_error_info=res.serializable_error_info
)
)
self._run_id_to_repository_location_handle_cache[run.run_id] = repository_location_handle
return run
def _get_grpc_client_for_termination(self, run_id):
if not self._instance:
return None
run = self._instance.get_run_by_id(run_id)
if not run or run.is_finished:
return None
tags = run.tags
if GRPC_INFO_TAG not in tags:
return None
grpc_info = seven.json.loads(tags.get(GRPC_INFO_TAG))
return DagsterGrpcClient(
port=grpc_info.get("port"), socket=grpc_info.get("socket"), host=grpc_info.get("host")
)
def can_terminate(self, run_id):
check.str_param(run_id, "run_id")
client = self._get_grpc_client_for_termination(run_id)
if not client:
return False
try:
res = client.can_cancel_execution(CanCancelExecutionRequest(run_id=run_id), timeout=5)
except grpc._channel._InactiveRpcError: # pylint: disable=protected-access
# Server that created the run may no longer exist
return False
return res.can_cancel
def terminate(self, run_id):
check.str_param(run_id, "run_id")
if not self._instance:
return False
run = self._instance.get_run_by_id(run_id)
if not run:
return False
self._instance.report_engine_event(
message="Received pipeline termination request.", pipeline_run=run, cls=self.__class__
)
client = self._get_grpc_client_for_termination(run_id)
if not client:
self._instance.report_engine_event(
message="Unable to get grpc client to send termination request to.",
pipeline_run=run,
cls=self.__class__,
)
return False
res = client.cancel_execution(CancelExecutionRequest(run_id=run_id))
return res.success
def join(self, timeout=30):
# If this hasn't been initialized at all, we can just do a noop
if not self._instance:
return
total_time = 0
interval = 0.01
while True:
active_run_ids = [
run_id
for run_id in self._run_id_to_repository_location_handle_cache.keys()
if (
self._instance.get_run_by_id(run_id)
and not self._instance.get_run_by_id(run_id).is_finished
)
]
if len(active_run_ids) == 0:
return
if total_time >= timeout:
raise Exception(
"Timed out waiting for these runs to finish: {active_run_ids}".format(
active_run_ids=repr(active_run_ids)
)
)
total_time += interval
time.sleep(interval)
interval = interval * 2
def cleanup_managed_grpc_servers(self):
for repository_location_handle in self._run_id_to_repository_location_handle_cache.values():
if isinstance(repository_location_handle, ManagedGrpcPythonEnvRepositoryLocationHandle):
check.invariant(
repository_location_handle.is_cleaned_up,
"ManagedGrpcPythonRepositoryLocationHandle was not cleaned up "
"before test teardown. This may indicate that the handle is not "
"being used as a contextmanager.",
)
repository_location_handle.grpc_server_process.wait()
| true | true |
1c3b1a4a1cf17399ad55f386d2b7743cb44d6bef | 4,561 | py | Python | DQN.py | indigoLovee/DQN | 21a30484014331b21047ecddac4fa584828ee80a | [
"MIT"
] | 1 | 2022-01-17T11:42:20.000Z | 2022-01-17T11:42:20.000Z | DQN.py | indigoLovee/DQN | 21a30484014331b21047ecddac4fa584828ee80a | [
"MIT"
] | null | null | null | DQN.py | indigoLovee/DQN | 21a30484014331b21047ecddac4fa584828ee80a | [
"MIT"
] | null | null | null | import torch as T
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from buffer import ReplayBuffer
device = T.device("cuda:0" if T.cuda.is_available() else "cpu")
class DeepQNetwork(nn.Module):
def __init__(self, alpha, state_dim, action_dim, fc1_dim, fc2_dim):
super(DeepQNetwork, self).__init__()
self.fc1 = nn.Linear(state_dim, fc1_dim)
self.fc2 = nn.Linear(fc1_dim, fc2_dim)
self.q = nn.Linear(fc2_dim, action_dim)
self.optimizer = optim.Adam(self.parameters(), lr=alpha)
self.to(device)
def forward(self, state):
x = T.relu(self.fc1(state))
x = T.relu(self.fc2(x))
q = self.q(x)
return q
def save_checkpoint(self, checkpoint_file):
T.save(self.state_dict(), checkpoint_file, _use_new_zipfile_serialization=False)
def load_checkpoint(self, checkpoint_file):
self.load_state_dict(T.load(checkpoint_file))
class DQN:
def __init__(self, alpha, state_dim, action_dim, fc1_dim, fc2_dim, ckpt_dir,
gamma=0.99, tau=0.005, epsilon=1.0, eps_end=0.01, eps_dec=5e-4,
max_size=1000000, batch_size=256):
self.tau = tau
self.gamma = gamma
self.epsilon = epsilon
self.eps_min = eps_end
self.eps_dec = eps_dec
self.batch_size = batch_size
self.action_space = [i for i in range(action_dim)]
self.checkpoint_dir = ckpt_dir
self.q_eval = DeepQNetwork(alpha=alpha, state_dim=state_dim, action_dim=action_dim,
fc1_dim=fc1_dim, fc2_dim=fc2_dim)
self.q_target = DeepQNetwork(alpha=alpha, state_dim=state_dim, action_dim=action_dim,
fc1_dim=fc1_dim, fc2_dim=fc2_dim)
self.memory = ReplayBuffer(state_dim=state_dim, action_dim=action_dim,
max_size=max_size, batch_size=batch_size)
self.update_network_parameters(tau=1.0)
def update_network_parameters(self, tau=None):
if tau is None:
tau = self.tau
for q_target_params, q_eval_params in zip(self.q_target.parameters(), self.q_eval.parameters()):
q_target_params.data.copy_(tau * q_eval_params + (1 - tau) * q_target_params)
def remember(self, state, action, reward, state_, done):
self.memory.store_transition(state, action, reward, state_, done)
def choose_action(self, observation, isTrain=True):
state = T.tensor([observation], dtype=T.float).to(device)
actions = self.q_eval.forward(state)
action = T.argmax(actions).item()
if (np.random.random() < self.epsilon) and isTrain:
action = np.random.choice(self.action_space)
return action
def learn(self):
if not self.memory.ready():
return
states, actions, rewards, next_states, terminals = self.memory.sample_buffer()
batch_idx = np.arange(self.batch_size)
states_tensor = T.tensor(states, dtype=T.float).to(device)
rewards_tensor = T.tensor(rewards, dtype=T.float).to(device)
next_states_tensor = T.tensor(next_states, dtype=T.float).to(device)
terminals_tensor = T.tensor(terminals).to(device)
with T.no_grad():
q_ = self.q_target.forward(next_states_tensor)
q_[terminals_tensor] = 0.0
target = rewards_tensor + self.gamma * T.max(q_, dim=-1)[0]
q = self.q_eval.forward(states_tensor)[batch_idx, actions]
loss = F.mse_loss(q, target.detach())
self.q_eval.optimizer.zero_grad()
loss.backward()
self.q_eval.optimizer.step()
self.update_network_parameters()
self.epsilon = self.epsilon - self.eps_dec if self.epsilon > self.eps_min else self.eps_min
def save_models(self, episode):
self.q_eval.save_checkpoint(self.checkpoint_dir + 'Q_eval/DQN_q_eval_{}.pth'.format(episode))
print('Saving Q_eval network successfully!')
self.q_target.save_checkpoint(self.checkpoint_dir + 'Q_target/DQN_Q_target_{}.pth'.format(episode))
print('Saving Q_target network successfully!')
def load_models(self, episode):
self.q_eval.load_checkpoint(self.checkpoint_dir + 'Q_eval/DQN_q_eval_{}.pth'.format(episode))
print('Loading Q_eval network successfully!')
self.q_target.load_checkpoint(self.checkpoint_dir + 'Q_target/DQN_Q_target_{}.pth'.format(episode))
print('Loading Q_target network successfully!')
| 38.008333 | 107 | 0.657312 | import torch as T
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from buffer import ReplayBuffer
device = T.device("cuda:0" if T.cuda.is_available() else "cpu")
class DeepQNetwork(nn.Module):
def __init__(self, alpha, state_dim, action_dim, fc1_dim, fc2_dim):
super(DeepQNetwork, self).__init__()
self.fc1 = nn.Linear(state_dim, fc1_dim)
self.fc2 = nn.Linear(fc1_dim, fc2_dim)
self.q = nn.Linear(fc2_dim, action_dim)
self.optimizer = optim.Adam(self.parameters(), lr=alpha)
self.to(device)
def forward(self, state):
x = T.relu(self.fc1(state))
x = T.relu(self.fc2(x))
q = self.q(x)
return q
def save_checkpoint(self, checkpoint_file):
T.save(self.state_dict(), checkpoint_file, _use_new_zipfile_serialization=False)
def load_checkpoint(self, checkpoint_file):
self.load_state_dict(T.load(checkpoint_file))
class DQN:
def __init__(self, alpha, state_dim, action_dim, fc1_dim, fc2_dim, ckpt_dir,
gamma=0.99, tau=0.005, epsilon=1.0, eps_end=0.01, eps_dec=5e-4,
max_size=1000000, batch_size=256):
self.tau = tau
self.gamma = gamma
self.epsilon = epsilon
self.eps_min = eps_end
self.eps_dec = eps_dec
self.batch_size = batch_size
self.action_space = [i for i in range(action_dim)]
self.checkpoint_dir = ckpt_dir
self.q_eval = DeepQNetwork(alpha=alpha, state_dim=state_dim, action_dim=action_dim,
fc1_dim=fc1_dim, fc2_dim=fc2_dim)
self.q_target = DeepQNetwork(alpha=alpha, state_dim=state_dim, action_dim=action_dim,
fc1_dim=fc1_dim, fc2_dim=fc2_dim)
self.memory = ReplayBuffer(state_dim=state_dim, action_dim=action_dim,
max_size=max_size, batch_size=batch_size)
self.update_network_parameters(tau=1.0)
def update_network_parameters(self, tau=None):
if tau is None:
tau = self.tau
for q_target_params, q_eval_params in zip(self.q_target.parameters(), self.q_eval.parameters()):
q_target_params.data.copy_(tau * q_eval_params + (1 - tau) * q_target_params)
def remember(self, state, action, reward, state_, done):
self.memory.store_transition(state, action, reward, state_, done)
def choose_action(self, observation, isTrain=True):
state = T.tensor([observation], dtype=T.float).to(device)
actions = self.q_eval.forward(state)
action = T.argmax(actions).item()
if (np.random.random() < self.epsilon) and isTrain:
action = np.random.choice(self.action_space)
return action
def learn(self):
if not self.memory.ready():
return
states, actions, rewards, next_states, terminals = self.memory.sample_buffer()
batch_idx = np.arange(self.batch_size)
states_tensor = T.tensor(states, dtype=T.float).to(device)
rewards_tensor = T.tensor(rewards, dtype=T.float).to(device)
next_states_tensor = T.tensor(next_states, dtype=T.float).to(device)
terminals_tensor = T.tensor(terminals).to(device)
with T.no_grad():
q_ = self.q_target.forward(next_states_tensor)
q_[terminals_tensor] = 0.0
target = rewards_tensor + self.gamma * T.max(q_, dim=-1)[0]
q = self.q_eval.forward(states_tensor)[batch_idx, actions]
loss = F.mse_loss(q, target.detach())
self.q_eval.optimizer.zero_grad()
loss.backward()
self.q_eval.optimizer.step()
self.update_network_parameters()
self.epsilon = self.epsilon - self.eps_dec if self.epsilon > self.eps_min else self.eps_min
def save_models(self, episode):
self.q_eval.save_checkpoint(self.checkpoint_dir + 'Q_eval/DQN_q_eval_{}.pth'.format(episode))
print('Saving Q_eval network successfully!')
self.q_target.save_checkpoint(self.checkpoint_dir + 'Q_target/DQN_Q_target_{}.pth'.format(episode))
print('Saving Q_target network successfully!')
def load_models(self, episode):
self.q_eval.load_checkpoint(self.checkpoint_dir + 'Q_eval/DQN_q_eval_{}.pth'.format(episode))
print('Loading Q_eval network successfully!')
self.q_target.load_checkpoint(self.checkpoint_dir + 'Q_target/DQN_Q_target_{}.pth'.format(episode))
print('Loading Q_target network successfully!')
| true | true |
1c3b1a4b346c13ff2379002ebe655770265d47e4 | 581 | py | Python | galata/jupyter_server_test_config.py | xjc90s/jupyterlab | 82df0b635dae2c1a70a7c41fe7ee7af1c1caefb2 | [
"BSD-3-Clause"
] | null | null | null | galata/jupyter_server_test_config.py | xjc90s/jupyterlab | 82df0b635dae2c1a70a7c41fe7ee7af1c1caefb2 | [
"BSD-3-Clause"
] | 8 | 2022-01-04T19:19:07.000Z | 2022-03-03T22:11:12.000Z | galata/jupyter_server_test_config.py | xjc90s/jupyterlab | 82df0b635dae2c1a70a7c41fe7ee7af1c1caefb2 | [
"BSD-3-Clause"
] | null | null | null | import getpass
import os
from tempfile import mkdtemp
# Test if we are running in a docker
if getpass.getuser() == "jovyan":
c.ServerApp.ip = "0.0.0.0"
c.ServerApp.port = 8888
c.ServerApp.port_retries = 0
c.ServerApp.open_browser = False
c.LabApp.dev_mode = True
c.ServerApp.root_dir = os.environ.get("JUPYTERLAB_GALATA_ROOT_DIR", mkdtemp(prefix="galata-test-"))
c.ServerApp.token = ""
c.ServerApp.password = ""
c.ServerApp.disable_check_xsrf = True
c.LabApp.expose_app_in_browser = True
# Uncomment to set server log level to debug level
# c.ServerApp.log_level = "DEBUG"
| 26.409091 | 99 | 0.753873 | import getpass
import os
from tempfile import mkdtemp
if getpass.getuser() == "jovyan":
c.ServerApp.ip = "0.0.0.0"
c.ServerApp.port = 8888
c.ServerApp.port_retries = 0
c.ServerApp.open_browser = False
c.LabApp.dev_mode = True
c.ServerApp.root_dir = os.environ.get("JUPYTERLAB_GALATA_ROOT_DIR", mkdtemp(prefix="galata-test-"))
c.ServerApp.token = ""
c.ServerApp.password = ""
c.ServerApp.disable_check_xsrf = True
c.LabApp.expose_app_in_browser = True
| true | true |
1c3b1a94c848ca8a9589a7766f6c73f540f9bc4f | 20,270 | py | Python | examples/6a586378-063a-427c-92b2-87d6236615c6.py | lapaniku/GAS | e49ce302689af683da744cd172e0359c0ba0af86 | [
"MIT"
] | null | null | null | examples/6a586378-063a-427c-92b2-87d6236615c6.py | lapaniku/GAS | e49ce302689af683da744cd172e0359c0ba0af86 | [
"MIT"
] | null | null | null | examples/6a586378-063a-427c-92b2-87d6236615c6.py | lapaniku/GAS | e49ce302689af683da744cd172e0359c0ba0af86 | [
"MIT"
] | null | null | null | # This program was generated by "Generative Art Synthesizer"
# Generation date: 2021-11-28 02:06:28 UTC
# GAS change date: 2021-11-28 01:31:12 UTC
# GAS md5 hash: c291ffb9de6ad6dea37797c00163f591
# Python version: 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# For more information visit: https://github.com/volotat/GAS
#import python libraries
import os #OS version: default
import numpy as np #Numpy version: 1.19.5
from PIL import Image #PIL version: 8.1.2
#set initial params
SIZE = 768
GRID_CHANNELS = 30
def test_values(arr):
if np.isnan(arr).any():
raise Exception('Array has None elements!')
if np.amin(arr) < -1 or np.amax(arr) > 1:
raise Exception('Values went to far! [ %.2f : %.2f ]'%(np.amin(arr), np.amax(arr)) )
return arr
#define grid transformation methods
def transit(x, t_indx, s_indx, alphas):
res = x.copy()
res[:,:,t_indx] = np.sum(x[:,:,s_indx] * alphas, axis = -1)
return test_values(res.clip(-1,1))
def sin(x, t_indx, s_indx, scale = 1, shift = 0):
res = x.copy()
res[:,:,t_indx] = np.sin(x[:,:,s_indx] * 0.5 * np.pi * scale + shift)
return test_values(res)
def power(x, t_indx, s_indx, p = 1):
res = x.copy()
res[:,:,t_indx] = np.sign(x[:,:,s_indx]) * np.abs(x[:,:,s_indx]) ** p
return test_values(res)
def magnitude(x, t_indx, s_indx, ord = 2):
res = x.copy()
res[:,:,t_indx] = np.linalg.norm(x[:,:,s_indx], axis = -1, ord = ord) / np.sqrt(len(s_indx))
return test_values(res)
#set initial grid
grid = np.zeros((SIZE, SIZE, GRID_CHANNELS))
x = ((np.arange(SIZE)/(SIZE-1) - 0.5) * 2).reshape((1, SIZE)).repeat(SIZE, 0)
y = ((np.arange(SIZE)/(SIZE-1) - 0.5) * 2).reshape((SIZE, 1)).repeat(SIZE, 1)
phi = np.pi / 4
rx = (x * np.cos(phi) - y * np.sin(phi)) / 1.5
ry = (x * np.cos(phi) + y * np.sin(phi)) / 1.5
for i in range(15):
grid[:,:,i * 2 + 0] = x
grid[:,:,i * 2 + 1] = y
'''
grid[:,:,0 ] = x
grid[:,:,1 ] = y
grid[:,:,2 ] = rx
grid[:,:,3 ] = ry
grid[:,:, 4] = np.sin(x * 0.5 * np.pi)
grid[:,:, 5] = np.sin(y * 0.5 * np.pi)
grid[:,:, 6: 8] = magnitude(grid[:,:,4:6], [0, 1])
grid[:,:, 8] = np.sin(-x * 0.5 * np.pi)
grid[:,:, 9] = np.sin(-y * 0.5 * np.pi)
grid[:,:,10] = np.sin(rx * 0.5 * np.pi)
grid[:,:,11] = np.sin(ry * 0.5 * np.pi)
grid[:,:,12] = np.sin(-rx * 0.5 * np.pi)
grid[:,:,13] = np.sin(-ry * 0.5 * np.pi)
grid[:,:,14] = np.sin(x * 0.5 * np.pi* 2)
grid[:,:,15] = np.sin(y * 0.5 * np.pi* 2)
grid[:,:,16:18] = magnitude(grid[:,:,14:16], [0, 1])
grid[:,:,18] = np.cos(x * 0.5 * np.pi* 2)
grid[:,:,19] = np.cos(y * 0.5 * np.pi* 2)
grid[:,:,20:22] = magnitude(grid[:,:,18:20], [0, 1])
grid[:,:,22] = np.sin(rx * 0.5 * np.pi* 2)
grid[:,:,23] = np.sin(ry * 0.5 * np.pi* 2)
grid[:,:,24:26] = magnitude(grid[:,:,22:24], [0, 1])
grid[:,:,26] = np.cos(rx * 0.5 * np.pi* 2)
grid[:,:,27] = np.cos(ry * 0.5 * np.pi* 2)
grid[:,:,28:30] = magnitude(grid[:,:,26:28], [0, 1])
'''
'''
grid[:,:,30:32] = np.sin(x * 0.5 * np.pi * 3), np.sin(y * 0.5 * np.pi * 3)
grid[:,:,32:34] = magnitude(grid, [30, 31])
grid[:,:,34:36] = np.cos(x * 0.5 * np.pi * 3), np.cos(y * 0.5 * np.pi * 3)
grid[:,:,36:38] = magnitude(grid, [34, 35])
grid[:,:,38:40] = np.sin(rx * 0.5 * np.pi * 3), np.sin(ry * 0.5 * np.pi * 3)
grid[:,:,40:42] = magnitude(grid, [40, 41])
grid[:,:,42:44] = np.cos(rx * 0.5 * np.pi * 3), np.cos(ry * 0.5 * np.pi * 3)
grid[:,:,44:46] = magnitude(grid, [44, 45])
'''
#apply transformations to the grid
grid = transit(grid, 29, [19, 22], [0.162570065071097, 0.837429934928903])
grid = magnitude(grid, 12, [27, 0, 11, 8, 21, 25, 22], 2)
grid = power(grid, 16, 19, 0.6832593210243594)
grid = magnitude(grid, 5, [12, 29, 19, 16, 25, 5, 22, 20, 13, 18, 2, 17, 1, 7], 2)
grid = transit(grid, 19, [29, 17, 18, 24, 6, 12, 13, 11, 8, 20, 0], [0.11956496211881872, 0.05013356366149157, 0.13305054541369926, 0.15246500360275328, 0.05802002562963354, 0.04238582486905315, 0.19554926469073888, 0.08033490218765624, 0.04525393101315875, 0.03341790476201002, 0.08982407205098653])
grid = sin(grid, 29, 4, -2.803277175027569, 43.39456443062289)
grid = sin(grid, 23, 24, 2.0425954175290886, 11.358624030534827)
grid = magnitude(grid, 24, [0, 5, 18, 17, 29, 1, 10, 11, 14], 2)
grid = sin(grid, 3, 16, 0.08116001882496733, 29.607535899235273)
grid = transit(grid, 9, [24, 19], [0.14255129423463317, 0.8574487057653668])
grid = sin(grid, 3, 12, -2.4201168205485177, -42.76220889484386)
grid = transit(grid, 23, [0, 17, 16, 10, 1, 29, 12, 24, 11, 6, 23, 14, 2, 3], [0.04185852737464411, 0.0718958562209562, 0.04144628951866288, 0.06440992347220259, 0.05504490607061211, 0.06002707501087633, 0.14086245922498628, 0.05953422331122396, 0.13174771512588848, 0.09554676788043852, 0.054710845949554616, 0.07072254937205642, 0.046008773998673064, 0.06618408746922441])
grid = sin(grid, 28, 4, -1.1167240340353377, -33.51597872481756)
grid = sin(grid, 6, 16, -5.309685991010303, -73.84592367786468)
grid = sin(grid, 9, 23, -2.6796169840930895, -40.5412027819841)
grid = sin(grid, 15, 6, 1.6192227145410403, 18.29042695814111)
grid = sin(grid, 27, 3, 3.67033472109074, -92.21663905335483)
grid = transit(grid, 3, [4, 7, 29], [0.3386535705350276, 0.5538253330679384, 0.10752109639703403])
grid = sin(grid, 12, 28, 3.3754171464010443, 72.59735794386486)
grid = sin(grid, 28, 23, 3.9592736322225717, -28.872965332555125)
grid = magnitude(grid, 23, [16, 3, 27, 0, 21], 2)
grid = transit(grid, 14, [12, 6, 0, 10], [0.030935598884591263, 0.5285731990934327, 0.3402810463602381, 0.10021015566173799])
grid = transit(grid, 22, [19, 7, 5, 29, 6, 26], [0.14337063648845627, 0.24142817057917984, 0.11685279951452533, 0.023075679344775824, 0.2754286905791286, 0.19984402349393413])
grid = sin(grid, 26, 6, -2.4091211781495296, -82.64154311894532)
grid = magnitude(grid, 14, [3, 10, 14, 23, 26, 21, 20, 13, 17], 2)
grid = magnitude(grid, 7, [23, 6, 2], 2)
grid = sin(grid, 10, 22, 2.050315504251981, 89.5744631928493)
grid = transit(grid, 20, [25, 29, 1, 6, 12, 8, 14, 17, 23], [0.004899579789304808, 0.09314319527557183, 0.03998202780338693, 0.19272244068492897, 0.34501737224324885, 0.17740036381342622, 0.06353176938513716, 0.03203715570193849, 0.05126609530305695])
grid = sin(grid, 11, 20, 4.390960200726324, -71.44216611954899)
grid = magnitude(grid, 7, [20, 16, 1, 25, 13], 2)
grid = magnitude(grid, 22, [20, 27, 16, 11, 23, 15, 29, 25, 0, 17, 2, 5, 1, 28, 12], 2)
grid = transit(grid, 19, [20, 4, 3, 16], [0.1526830825248949, 0.7081439352898777, 0.06775906310079587, 0.07141391908443141])
grid = sin(grid, 26, 26, -3.0877394923548906, 44.03402898112404)
grid = transit(grid, 23, [15, 24, 12, 22, 19, 23], [0.2974634270268527, 0.18571837778194167, 0.22122718321511456, 0.05894778995186554, 0.19810172312674557, 0.03854149889747992])
grid = sin(grid, 11, 9, 1.0239782977935226, 65.26260230502231)
grid = sin(grid, 9, 25, -7.566790267054586, -75.63082272426975)
grid = transit(grid, 15, [9, 15, 26, 2, 24, 28, 19, 12, 5, 13, 1, 4, 16, 23, 20, 6, 10, 18], [0.06616516411967799, 0.09777590819145016, 0.0002712313941477737, 0.09746330541437898, 0.005397802149808619, 0.07718643014961299, 0.13148160411157372, 0.09102197762563803, 0.03185803109227711, 0.0241181740051075, 0.034195430141271195, 0.03951611394918786, 0.0787746792428292, 0.03692743163524459, 0.002994098366014297, 0.062803596094192, 0.08020173317182712, 0.041847289145760795])
grid = transit(grid, 4, [25, 29, 22, 1, 6, 9, 19, 21, 18, 16, 17, 5, 20, 0, 12], [0.08432611754036021, 0.009140664239356671, 0.08060088757913146, 0.06166245074953199, 0.1638729657005139, 0.034479801216239156, 0.014854982191717304, 0.08772065521432443, 0.043708056308515354, 0.11043876578842901, 0.12519722186516116, 0.023175558417975587, 0.01018347967163066, 0.1176477109575453, 0.032990682559567955])
grid = sin(grid, 12, 7, -1.7660206046047084, -85.28896741511835)
grid = transit(grid, 14, [18, 28, 1, 25, 26, 15, 16, 19, 12, 14, 5, 8], [0.061399029075051736, 0.16128653484720623, 0.04238018700257984, 0.07363074210463408, 0.09276563659827074, 0.044181324827153534, 0.1458446676143112, 0.002814862772849515, 0.12681141102429905, 0.09165683421119886, 0.05453631469851343, 0.10269245522393174])
grid = power(grid, 28, 29, 0.3779782940331584)
grid = sin(grid, 19, 27, -2.5705555933884487, 80.63298070706631)
grid = transit(grid, 25, [0, 5, 12, 29, 27, 7, 2, 25, 18, 8], [0.13649589534415188, 0.014678989258920187, 0.07847237192131681, 0.11124864055343385, 0.08219416634833716, 0.04507998114443801, 0.1618879569706191, 0.2700382467691338, 0.05609092564881936, 0.043812826040829804])
grid = sin(grid, 27, 5, 2.9386845297236146, -8.401158648822786)
grid = transit(grid, 9, [29, 10, 7, 18, 25, 11, 16, 21], [0.029167418654736797, 0.003595910474137072, 0.29287241571346795, 0.07022515471929672, 0.10158497813413986, 0.059246305191391915, 0.32077383459013076, 0.12253398252269886])
grid = power(grid, 4, 27, 2.403332922052619)
grid = sin(grid, 12, 27, -6.706458142837571, -67.86915645909208)
grid = sin(grid, 12, 16, -2.708811323493574, 61.963825635946876)
grid = sin(grid, 27, 21, 2.979990355413568, 55.608556726719144)
grid = transit(grid, 10, [25, 12], [0.917610969357756, 0.08238903064224394])
grid = sin(grid, 7, 7, 1.3211939890122422, 72.76020788877838)
grid = sin(grid, 14, 4, 0.5306507527772861, 80.17684312260022)
grid = sin(grid, 3, 10, -0.5503493938868814, -5.962185131409427)
grid = transit(grid, 15, [25, 28, 1, 5], [0.321600534827853, 0.10542445081098709, 0.3304333599084767, 0.2425416544526832])
grid = magnitude(grid, 20, [6, 9], 2)
grid = sin(grid, 18, 25, 1.3894004680099752, 89.25662087299591)
grid = transit(grid, 20, [4, 17, 22, 9, 2, 3], [0.28470977015474064, 0.11783602510587528, 0.14947881924125034, 0.07641898119264072, 0.299966212618196, 0.07159019168729713])
grid = sin(grid, 2, 12, -0.5867291269053801, 28.167611255741008)
grid = transit(grid, 16, [17, 27, 6, 23, 19, 25, 16, 9, 14, 4, 0], [0.10447719623269758, 0.048407918591629864, 0.007763166119990237, 0.0022140470040574907, 0.24849459533294363, 0.12023286519064905, 0.17871974770364935, 0.05735132208762337, 0.036326220968249515, 0.10867143758787537, 0.08734148318063453])
grid = transit(grid, 1, [7, 23, 26, 14, 20, 19, 28, 8, 9, 17, 22, 0, 5], [0.013121733291342785, 0.11301130736686454, 0.01352586294053668, 0.15913305810858402, 0.11915145281306491, 0.0006517312319511964, 0.008548387417477, 0.0904394241347426, 0.01789058436883307, 0.07037753745683703, 0.07076335166856433, 0.07800578985114522, 0.24537977935005661])
grid = sin(grid, 18, 28, -1.9841443767920823, -69.03014320058003)
grid = transit(grid, 5, [22, 19, 25, 27, 20, 21, 18, 3], [0.13269119455929568, 0.13927419514581135, 0.2353360212273103, 0.05358973946883631, 0.11709248299854554, 0.05695975943841826, 0.03345046365270227, 0.2316061435090803])
grid = sin(grid, 1, 9, -3.8143949239238193, -90.24439830058608)
grid = magnitude(grid, 22, [13, 26, 6, 9, 7, 15, 8, 21, 12, 25, 29, 5, 23, 19], 2)
grid = magnitude(grid, 7, [15, 10, 23], 2)
grid = transit(grid, 18, [29, 8, 1, 11, 13, 9, 27, 6, 21, 4, 10, 15, 19, 5, 0], [0.1772994331856414, 0.068125216107354, 0.18999349792890638, 0.019818681800181075, 0.04415831002215933, 0.03035314916143176, 0.019871427752056706, 0.17461556101263392, 0.043587710771764196, 0.037194038159689476, 0.05795222455290955, 0.022431635860234538, 0.07352074703380847, 0.020324565267279607, 0.020753801383949444])
grid = magnitude(grid, 28, [27, 11, 1, 5, 28], 2)
grid = sin(grid, 10, 29, 0.9754517253039042, 19.167473301905645)
grid = sin(grid, 12, 27, -2.8452733457996318, 79.15482610320453)
grid = transit(grid, 23, [23, 19, 27, 29, 5, 2, 20, 0, 6, 14, 28], [0.08798444677572927, 0.12029606201225304, 0.10947313847863878, 0.12859008053676993, 0.045403922186473065, 0.12432237963799758, 0.14016659493536382, 0.04300443803477972, 0.01734694652028419, 0.0981174832644981, 0.0852945076172126])
grid = magnitude(grid, 23, [12, 14, 19, 21, 9], 2)
grid = power(grid, 29, 5, 1.817083710798804)
grid = magnitude(grid, 2, [13, 6, 29, 24, 27, 21, 16, 14, 22, 4], 2)
grid = transit(grid, 8, [3], [1.0])
grid = sin(grid, 18, 29, 0.6389415083446274, 80.8749712491909)
grid = transit(grid, 14, [25, 2, 18, 8, 3, 15, 1, 16, 14, 5, 6, 13, 20, 0, 26, 9, 22, 28, 10, 17, 4, 29, 24, 11, 7, 23], [0.005871309937873514, 0.018003505494664063, 0.004367270790055393, 0.02131452056580914, 0.04892744693923117, 0.058471392811199306, 0.07048395159287357, 0.017318473991941724, 0.07994321420066938, 0.05394716631490228, 0.03519745217167912, 0.02235005759125536, 0.04715412242850838, 0.02747045659126205, 0.07155917229027325, 0.060822977475415284, 0.04361415578730645, 0.0354280883191885, 0.005739110717730895, 0.04491271555615977, 0.04349743113332699, 0.026095715559849145, 0.026899299768024996, 0.037327346757871395, 0.012878407330765023, 0.080405237882164])
grid = sin(grid, 10, 6, 1.615810670294585, 6.059030950147061)
grid = transit(grid, 4, [21], [1.0])
grid = transit(grid, 0, [9, 6, 21, 23, 10, 25, 5, 29], [0.008407947275370496, 0.22332672029591105, 0.17669528269181908, 0.061943871236512044, 0.00631614100316715, 0.04896761458648364, 0.219980694309923, 0.25436172860081346])
grid = transit(grid, 7, [28, 3, 8, 24, 5, 26, 16, 25, 20], [0.008076733658800364, 0.13788018636125093, 0.047745229040943256, 0.08590191794838145, 0.3359021344945731, 0.13627021446299625, 0.07199274602464636, 0.05807097072801425, 0.11815986728039397])
grid = magnitude(grid, 5, [13, 16], 2)
grid = sin(grid, 23, 19, -0.6582224493825697, 87.34462867779135)
grid = sin(grid, 28, 10, 2.201883125073067, 80.07621747819877)
grid = magnitude(grid, 10, [12, 0, 4, 21, 8, 20, 3, 27, 7, 13, 6], 2)
grid = magnitude(grid, 26, [12, 16, 6, 22, 27, 20, 14, 26, 19, 18, 8], 2)
grid = transit(grid, 21, [10, 1, 21, 23, 6, 22, 7, 24, 15], [0.14098959190193464, 0.16885649738040365, 0.21508280162861007, 0.08756191883026329, 0.06797141331659777, 0.040586793475855774, 0.07556426350656567, 0.13577049344872752, 0.06761622651104154])
grid = sin(grid, 19, 21, -1.7650391823704892, 89.53123311944441)
grid = sin(grid, 27, 20, 4.102907734657698, -18.506040345024942)
grid = sin(grid, 8, 16, -3.2047717871756047, 50.031074019769875)
grid = sin(grid, 1, 3, -1.2298315940257807, 67.92621901520556)
grid = transit(grid, 10, [5, 12, 6, 7, 21], [0.04017259629004803, 0.035935993838594436, 0.38852998766486463, 0.39464597353448644, 0.14071544867200647])
grid = transit(grid, 17, [9, 22, 0, 8], [0.07232347283831883, 0.23909484436189507, 0.16476406248235922, 0.523817620317427])
grid = sin(grid, 11, 23, 3.4291605572557367, 77.65408388973503)
grid = sin(grid, 20, 27, 1.9438852600878178, -69.26160333661483)
grid = transit(grid, 19, [13, 7, 14, 12, 29], [0.15078244000703844, 0.30329951250855647, 0.052826921314074654, 0.014009457594495888, 0.47908166857583445])
grid = magnitude(grid, 4, [13, 5, 9, 16, 11, 6, 24, 14, 12, 1, 3, 22, 20, 8, 7, 15], 2)
grid = power(grid, 5, 12, 0.32534340921919336)
grid = sin(grid, 10, 18, -1.5546372261064723, 79.40807200485779)
grid = transit(grid, 0, [2, 17, 23, 0, 9, 10, 6, 20, 26, 22, 25, 11, 27, 21, 13, 12, 1, 16], [0.04666741358409691, 0.10041235530271467, 0.13656890415482237, 0.0505888093029676, 0.07654195022307062, 0.18419004471071113, 0.05046245679380782, 0.007552503418946401, 0.0004481713476168337, 0.011885682921671083, 0.032088980266198504, 0.0023404498982659153, 0.03348183036453658, 0.045952614669238355, 0.10372072735870042, 0.01789271596791753, 0.04799484234941445, 0.05120954736530277])
grid = transit(grid, 5, [25, 27], [0.5725941398341273, 0.4274058601658726])
grid = transit(grid, 19, [15, 29, 23, 21, 11, 10, 20, 4, 17, 2, 0, 13, 3, 1, 5, 8], [0.031537796589242675, 0.02689608568216389, 0.006352970412025167, 0.02945197836138472, 0.004733254055032123, 0.031841731262449186, 0.09701814809592517, 0.021578470501320998, 0.0071680768933244385, 0.11952705984922679, 0.05536518282979036, 0.06581861347697791, 0.2343306944232139, 0.10779797912646302, 0.09300120880000046, 0.06758074964145924])
grid = sin(grid, 11, 0, 2.563909435379265, 0.4450018649816627)
grid = magnitude(grid, 27, [16, 11, 8, 0, 28, 12, 17, 15, 18, 20, 26], 2)
grid = sin(grid, 19, 21, -3.4132246278785883, 84.16701903091374)
grid = sin(grid, 28, 13, -2.3675718627702755, 41.175741022703875)
grid = transit(grid, 7, [10, 9, 27, 23, 19, 5, 1, 22, 7], [0.14363062171830737, 0.01687188812140151, 0.13196113638415463, 0.17866921525288296, 0.07172903294369104, 0.1268434984434265, 0.09262190525507281, 0.13843366504033602, 0.09923903684072709])
grid = sin(grid, 1, 17, 0.4190217510777763, -96.31752118334663)
grid = sin(grid, 4, 25, -3.0130782974573114, 0.045638670109738655)
grid = sin(grid, 23, 16, -6.362468253360612, 24.730444687537883)
grid = sin(grid, 17, 14, -2.3747658845203916, -57.23440657206675)
grid = sin(grid, 19, 19, 2.4592230816940326, -51.76853764043066)
grid = magnitude(grid, 26, [4], 2)
grid = sin(grid, 0, 26, 2.879410066683457, 5.223173332129804)
grid = sin(grid, 10, 6, -3.2555765761277127, -17.443575197843472)
grid = transit(grid, 25, [24, 2, 25, 0, 12, 4], [0.03568795619526225, 0.08500737200701228, 0.05240229364632595, 0.603980978240824, 0.17712678127987705, 0.04579461863069845])
grid = sin(grid, 6, 29, -0.12051802831906497, 89.64443842624468)
grid = transit(grid, 5, [27, 16, 23, 9, 17], [0.22131895230663756, 0.07144315447485797, 0.49736096611646524, 0.13402807138531572, 0.07584885571672335])
grid = transit(grid, 12, [22, 8, 29, 16], [0.25792429467789524, 0.25168913008212207, 0.38751847922685195, 0.10286809601313074])
grid = magnitude(grid, 24, [12], 2)
grid = sin(grid, 21, 18, 6.904882453110925, 43.76686597000625)
grid = sin(grid, 27, 12, -2.3149706703321784, 91.4634229451533)
grid = transit(grid, 11, [18, 8, 23], [0.9456048289219839, 0.02282944678495521, 0.031565724293060864])
grid = sin(grid, 3, 22, -5.427035197241231, 70.63770520279803)
grid = sin(grid, 27, 15, -0.8306409707765449, 16.388610614890496)
grid = magnitude(grid, 22, [19, 20, 0, 14, 29, 16, 13, 11, 12, 7], 2)
grid = sin(grid, 22, 2, -0.2131858223375026, 23.110302271816437)
grid = transit(grid, 15, [26, 17, 18, 3, 20, 6], [0.26298407289730785, 0.036929318879447975, 0.21956318893577373, 0.12140448131206344, 0.2932362214654605, 0.06588271650994641])
grid = transit(grid, 6, [10, 0, 14, 4, 16, 26, 29], [0.3266526570756889, 0.010367316493219989, 0.06038405155138366, 0.18542143850276785, 0.15350917236048142, 0.1066459060285463, 0.1570194579879119])
grid = transit(grid, 3, [29, 9, 12, 18, 10, 17], [0.30897372209328766, 0.10630103874152365, 0.15658027364196273, 0.29474023685015555, 0.1326016707345515, 0.0008030579385190207])
grid = magnitude(grid, 11, [16, 7, 25, 22, 3, 17, 13, 0, 12, 27], 2)
grid = sin(grid, 13, 1, 2.9658129882147084, -41.317540719432344)
grid = magnitude(grid, 2, [19, 13, 16, 27, 26], 2)
grid = sin(grid, 1, 8, 6.699130217836646, 5.293135687331116)
grid = sin(grid, 0, 27, -6.580745881619362, 70.25836976864827)
grid = transit(grid, 24, [5], [1.0])
grid = sin(grid, 10, 7, -3.7620909835549288, -17.85297224969564)
grid = sin(grid, 18, 17, 6.374775580070441, -82.34320143877852)
grid = transit(grid, 27, [21, 3, 19, 27, 10, 17, 13], [0.03817904844946292, 0.12948414697169902, 0.02053094019023183, 0.17470975944365325, 0.2705479342577574, 0.164395301382941, 0.20215286930425458])
#create color space
colors = np.zeros((6, 3))
colors[0] = [51, 169, 182]
colors[1] = [8, 23, 138]
colors[2] = [93, 97, 239]
colors[3] = [98, 25, 66]
colors[4] = [60, 71, 233]
colors[5] = [191, 187, 173]
res = np.zeros((SIZE, SIZE, 3))
res += (grid[:,:,0:0+1].repeat(3, -1) + 1) / 2 * colors[0]
res += (grid[:,:,1:1+1].repeat(3, -1) + 1) / 2 * colors[1]
res += (grid[:,:,2:2+1].repeat(3, -1) + 1) / 2 * colors[2]
res += (grid[:,:,3:3+1].repeat(3, -1) + 1) / 2 * colors[3]
res += (grid[:,:,4:4+1].repeat(3, -1) + 1) / 2 * colors[4]
res += (grid[:,:,5:5+1].repeat(3, -1) + 1) / 2 * colors[5]
res = res / colors.sum(0) * 255
#save results
im = Image.fromarray(np.uint8(res))
im.save(os.path.basename(__file__) + '.png')
'''
#save layers
img = np.zeros((SIZE * 6, SIZE * 6))
for j in range(GRID_CHANNELS):
x = j % 6
y = j // 6
img[x*SIZE:(x + 1)*SIZE, y*SIZE:(y+1)*SIZE] = grid[:,:,j]
img = (img + 1) * 127.5
im = Image.fromarray(np.uint8(img))
im.save(os.path.basename(__file__) + '_layers.png')
'''
| 71.879433 | 676 | 0.694919 |
import os
import numpy as np
from PIL import Image
SIZE = 768
GRID_CHANNELS = 30
def test_values(arr):
if np.isnan(arr).any():
raise Exception('Array has None elements!')
if np.amin(arr) < -1 or np.amax(arr) > 1:
raise Exception('Values went to far! [ %.2f : %.2f ]'%(np.amin(arr), np.amax(arr)) )
return arr
def transit(x, t_indx, s_indx, alphas):
res = x.copy()
res[:,:,t_indx] = np.sum(x[:,:,s_indx] * alphas, axis = -1)
return test_values(res.clip(-1,1))
def sin(x, t_indx, s_indx, scale = 1, shift = 0):
res = x.copy()
res[:,:,t_indx] = np.sin(x[:,:,s_indx] * 0.5 * np.pi * scale + shift)
return test_values(res)
def power(x, t_indx, s_indx, p = 1):
res = x.copy()
res[:,:,t_indx] = np.sign(x[:,:,s_indx]) * np.abs(x[:,:,s_indx]) ** p
return test_values(res)
def magnitude(x, t_indx, s_indx, ord = 2):
res = x.copy()
res[:,:,t_indx] = np.linalg.norm(x[:,:,s_indx], axis = -1, ord = ord) / np.sqrt(len(s_indx))
return test_values(res)
grid = np.zeros((SIZE, SIZE, GRID_CHANNELS))
x = ((np.arange(SIZE)/(SIZE-1) - 0.5) * 2).reshape((1, SIZE)).repeat(SIZE, 0)
y = ((np.arange(SIZE)/(SIZE-1) - 0.5) * 2).reshape((SIZE, 1)).repeat(SIZE, 1)
phi = np.pi / 4
rx = (x * np.cos(phi) - y * np.sin(phi)) / 1.5
ry = (x * np.cos(phi) + y * np.sin(phi)) / 1.5
for i in range(15):
grid[:,:,i * 2 + 0] = x
grid[:,:,i * 2 + 1] = y
grid = transit(grid, 29, [19, 22], [0.162570065071097, 0.837429934928903])
grid = magnitude(grid, 12, [27, 0, 11, 8, 21, 25, 22], 2)
grid = power(grid, 16, 19, 0.6832593210243594)
grid = magnitude(grid, 5, [12, 29, 19, 16, 25, 5, 22, 20, 13, 18, 2, 17, 1, 7], 2)
grid = transit(grid, 19, [29, 17, 18, 24, 6, 12, 13, 11, 8, 20, 0], [0.11956496211881872, 0.05013356366149157, 0.13305054541369926, 0.15246500360275328, 0.05802002562963354, 0.04238582486905315, 0.19554926469073888, 0.08033490218765624, 0.04525393101315875, 0.03341790476201002, 0.08982407205098653])
grid = sin(grid, 29, 4, -2.803277175027569, 43.39456443062289)
grid = sin(grid, 23, 24, 2.0425954175290886, 11.358624030534827)
grid = magnitude(grid, 24, [0, 5, 18, 17, 29, 1, 10, 11, 14], 2)
grid = sin(grid, 3, 16, 0.08116001882496733, 29.607535899235273)
grid = transit(grid, 9, [24, 19], [0.14255129423463317, 0.8574487057653668])
grid = sin(grid, 3, 12, -2.4201168205485177, -42.76220889484386)
grid = transit(grid, 23, [0, 17, 16, 10, 1, 29, 12, 24, 11, 6, 23, 14, 2, 3], [0.04185852737464411, 0.0718958562209562, 0.04144628951866288, 0.06440992347220259, 0.05504490607061211, 0.06002707501087633, 0.14086245922498628, 0.05953422331122396, 0.13174771512588848, 0.09554676788043852, 0.054710845949554616, 0.07072254937205642, 0.046008773998673064, 0.06618408746922441])
grid = sin(grid, 28, 4, -1.1167240340353377, -33.51597872481756)
grid = sin(grid, 6, 16, -5.309685991010303, -73.84592367786468)
grid = sin(grid, 9, 23, -2.6796169840930895, -40.5412027819841)
grid = sin(grid, 15, 6, 1.6192227145410403, 18.29042695814111)
grid = sin(grid, 27, 3, 3.67033472109074, -92.21663905335483)
grid = transit(grid, 3, [4, 7, 29], [0.3386535705350276, 0.5538253330679384, 0.10752109639703403])
grid = sin(grid, 12, 28, 3.3754171464010443, 72.59735794386486)
grid = sin(grid, 28, 23, 3.9592736322225717, -28.872965332555125)
grid = magnitude(grid, 23, [16, 3, 27, 0, 21], 2)
grid = transit(grid, 14, [12, 6, 0, 10], [0.030935598884591263, 0.5285731990934327, 0.3402810463602381, 0.10021015566173799])
grid = transit(grid, 22, [19, 7, 5, 29, 6, 26], [0.14337063648845627, 0.24142817057917984, 0.11685279951452533, 0.023075679344775824, 0.2754286905791286, 0.19984402349393413])
grid = sin(grid, 26, 6, -2.4091211781495296, -82.64154311894532)
grid = magnitude(grid, 14, [3, 10, 14, 23, 26, 21, 20, 13, 17], 2)
grid = magnitude(grid, 7, [23, 6, 2], 2)
grid = sin(grid, 10, 22, 2.050315504251981, 89.5744631928493)
grid = transit(grid, 20, [25, 29, 1, 6, 12, 8, 14, 17, 23], [0.004899579789304808, 0.09314319527557183, 0.03998202780338693, 0.19272244068492897, 0.34501737224324885, 0.17740036381342622, 0.06353176938513716, 0.03203715570193849, 0.05126609530305695])
grid = sin(grid, 11, 20, 4.390960200726324, -71.44216611954899)
grid = magnitude(grid, 7, [20, 16, 1, 25, 13], 2)
grid = magnitude(grid, 22, [20, 27, 16, 11, 23, 15, 29, 25, 0, 17, 2, 5, 1, 28, 12], 2)
grid = transit(grid, 19, [20, 4, 3, 16], [0.1526830825248949, 0.7081439352898777, 0.06775906310079587, 0.07141391908443141])
grid = sin(grid, 26, 26, -3.0877394923548906, 44.03402898112404)
grid = transit(grid, 23, [15, 24, 12, 22, 19, 23], [0.2974634270268527, 0.18571837778194167, 0.22122718321511456, 0.05894778995186554, 0.19810172312674557, 0.03854149889747992])
grid = sin(grid, 11, 9, 1.0239782977935226, 65.26260230502231)
grid = sin(grid, 9, 25, -7.566790267054586, -75.63082272426975)
grid = transit(grid, 15, [9, 15, 26, 2, 24, 28, 19, 12, 5, 13, 1, 4, 16, 23, 20, 6, 10, 18], [0.06616516411967799, 0.09777590819145016, 0.0002712313941477737, 0.09746330541437898, 0.005397802149808619, 0.07718643014961299, 0.13148160411157372, 0.09102197762563803, 0.03185803109227711, 0.0241181740051075, 0.034195430141271195, 0.03951611394918786, 0.0787746792428292, 0.03692743163524459, 0.002994098366014297, 0.062803596094192, 0.08020173317182712, 0.041847289145760795])
grid = transit(grid, 4, [25, 29, 22, 1, 6, 9, 19, 21, 18, 16, 17, 5, 20, 0, 12], [0.08432611754036021, 0.009140664239356671, 0.08060088757913146, 0.06166245074953199, 0.1638729657005139, 0.034479801216239156, 0.014854982191717304, 0.08772065521432443, 0.043708056308515354, 0.11043876578842901, 0.12519722186516116, 0.023175558417975587, 0.01018347967163066, 0.1176477109575453, 0.032990682559567955])
grid = sin(grid, 12, 7, -1.7660206046047084, -85.28896741511835)
grid = transit(grid, 14, [18, 28, 1, 25, 26, 15, 16, 19, 12, 14, 5, 8], [0.061399029075051736, 0.16128653484720623, 0.04238018700257984, 0.07363074210463408, 0.09276563659827074, 0.044181324827153534, 0.1458446676143112, 0.002814862772849515, 0.12681141102429905, 0.09165683421119886, 0.05453631469851343, 0.10269245522393174])
grid = power(grid, 28, 29, 0.3779782940331584)
grid = sin(grid, 19, 27, -2.5705555933884487, 80.63298070706631)
grid = transit(grid, 25, [0, 5, 12, 29, 27, 7, 2, 25, 18, 8], [0.13649589534415188, 0.014678989258920187, 0.07847237192131681, 0.11124864055343385, 0.08219416634833716, 0.04507998114443801, 0.1618879569706191, 0.2700382467691338, 0.05609092564881936, 0.043812826040829804])
grid = sin(grid, 27, 5, 2.9386845297236146, -8.401158648822786)
grid = transit(grid, 9, [29, 10, 7, 18, 25, 11, 16, 21], [0.029167418654736797, 0.003595910474137072, 0.29287241571346795, 0.07022515471929672, 0.10158497813413986, 0.059246305191391915, 0.32077383459013076, 0.12253398252269886])
grid = power(grid, 4, 27, 2.403332922052619)
grid = sin(grid, 12, 27, -6.706458142837571, -67.86915645909208)
grid = sin(grid, 12, 16, -2.708811323493574, 61.963825635946876)
grid = sin(grid, 27, 21, 2.979990355413568, 55.608556726719144)
grid = transit(grid, 10, [25, 12], [0.917610969357756, 0.08238903064224394])
grid = sin(grid, 7, 7, 1.3211939890122422, 72.76020788877838)
grid = sin(grid, 14, 4, 0.5306507527772861, 80.17684312260022)
grid = sin(grid, 3, 10, -0.5503493938868814, -5.962185131409427)
grid = transit(grid, 15, [25, 28, 1, 5], [0.321600534827853, 0.10542445081098709, 0.3304333599084767, 0.2425416544526832])
grid = magnitude(grid, 20, [6, 9], 2)
grid = sin(grid, 18, 25, 1.3894004680099752, 89.25662087299591)
grid = transit(grid, 20, [4, 17, 22, 9, 2, 3], [0.28470977015474064, 0.11783602510587528, 0.14947881924125034, 0.07641898119264072, 0.299966212618196, 0.07159019168729713])
grid = sin(grid, 2, 12, -0.5867291269053801, 28.167611255741008)
grid = transit(grid, 16, [17, 27, 6, 23, 19, 25, 16, 9, 14, 4, 0], [0.10447719623269758, 0.048407918591629864, 0.007763166119990237, 0.0022140470040574907, 0.24849459533294363, 0.12023286519064905, 0.17871974770364935, 0.05735132208762337, 0.036326220968249515, 0.10867143758787537, 0.08734148318063453])
grid = transit(grid, 1, [7, 23, 26, 14, 20, 19, 28, 8, 9, 17, 22, 0, 5], [0.013121733291342785, 0.11301130736686454, 0.01352586294053668, 0.15913305810858402, 0.11915145281306491, 0.0006517312319511964, 0.008548387417477, 0.0904394241347426, 0.01789058436883307, 0.07037753745683703, 0.07076335166856433, 0.07800578985114522, 0.24537977935005661])
grid = sin(grid, 18, 28, -1.9841443767920823, -69.03014320058003)
grid = transit(grid, 5, [22, 19, 25, 27, 20, 21, 18, 3], [0.13269119455929568, 0.13927419514581135, 0.2353360212273103, 0.05358973946883631, 0.11709248299854554, 0.05695975943841826, 0.03345046365270227, 0.2316061435090803])
grid = sin(grid, 1, 9, -3.8143949239238193, -90.24439830058608)
grid = magnitude(grid, 22, [13, 26, 6, 9, 7, 15, 8, 21, 12, 25, 29, 5, 23, 19], 2)
grid = magnitude(grid, 7, [15, 10, 23], 2)
grid = transit(grid, 18, [29, 8, 1, 11, 13, 9, 27, 6, 21, 4, 10, 15, 19, 5, 0], [0.1772994331856414, 0.068125216107354, 0.18999349792890638, 0.019818681800181075, 0.04415831002215933, 0.03035314916143176, 0.019871427752056706, 0.17461556101263392, 0.043587710771764196, 0.037194038159689476, 0.05795222455290955, 0.022431635860234538, 0.07352074703380847, 0.020324565267279607, 0.020753801383949444])
grid = magnitude(grid, 28, [27, 11, 1, 5, 28], 2)
grid = sin(grid, 10, 29, 0.9754517253039042, 19.167473301905645)
grid = sin(grid, 12, 27, -2.8452733457996318, 79.15482610320453)
grid = transit(grid, 23, [23, 19, 27, 29, 5, 2, 20, 0, 6, 14, 28], [0.08798444677572927, 0.12029606201225304, 0.10947313847863878, 0.12859008053676993, 0.045403922186473065, 0.12432237963799758, 0.14016659493536382, 0.04300443803477972, 0.01734694652028419, 0.0981174832644981, 0.0852945076172126])
grid = magnitude(grid, 23, [12, 14, 19, 21, 9], 2)
grid = power(grid, 29, 5, 1.817083710798804)
grid = magnitude(grid, 2, [13, 6, 29, 24, 27, 21, 16, 14, 22, 4], 2)
grid = transit(grid, 8, [3], [1.0])
grid = sin(grid, 18, 29, 0.6389415083446274, 80.8749712491909)
grid = transit(grid, 14, [25, 2, 18, 8, 3, 15, 1, 16, 14, 5, 6, 13, 20, 0, 26, 9, 22, 28, 10, 17, 4, 29, 24, 11, 7, 23], [0.005871309937873514, 0.018003505494664063, 0.004367270790055393, 0.02131452056580914, 0.04892744693923117, 0.058471392811199306, 0.07048395159287357, 0.017318473991941724, 0.07994321420066938, 0.05394716631490228, 0.03519745217167912, 0.02235005759125536, 0.04715412242850838, 0.02747045659126205, 0.07155917229027325, 0.060822977475415284, 0.04361415578730645, 0.0354280883191885, 0.005739110717730895, 0.04491271555615977, 0.04349743113332699, 0.026095715559849145, 0.026899299768024996, 0.037327346757871395, 0.012878407330765023, 0.080405237882164])
grid = sin(grid, 10, 6, 1.615810670294585, 6.059030950147061)
grid = transit(grid, 4, [21], [1.0])
grid = transit(grid, 0, [9, 6, 21, 23, 10, 25, 5, 29], [0.008407947275370496, 0.22332672029591105, 0.17669528269181908, 0.061943871236512044, 0.00631614100316715, 0.04896761458648364, 0.219980694309923, 0.25436172860081346])
grid = transit(grid, 7, [28, 3, 8, 24, 5, 26, 16, 25, 20], [0.008076733658800364, 0.13788018636125093, 0.047745229040943256, 0.08590191794838145, 0.3359021344945731, 0.13627021446299625, 0.07199274602464636, 0.05807097072801425, 0.11815986728039397])
grid = magnitude(grid, 5, [13, 16], 2)
grid = sin(grid, 23, 19, -0.6582224493825697, 87.34462867779135)
grid = sin(grid, 28, 10, 2.201883125073067, 80.07621747819877)
grid = magnitude(grid, 10, [12, 0, 4, 21, 8, 20, 3, 27, 7, 13, 6], 2)
grid = magnitude(grid, 26, [12, 16, 6, 22, 27, 20, 14, 26, 19, 18, 8], 2)
grid = transit(grid, 21, [10, 1, 21, 23, 6, 22, 7, 24, 15], [0.14098959190193464, 0.16885649738040365, 0.21508280162861007, 0.08756191883026329, 0.06797141331659777, 0.040586793475855774, 0.07556426350656567, 0.13577049344872752, 0.06761622651104154])
grid = sin(grid, 19, 21, -1.7650391823704892, 89.53123311944441)
grid = sin(grid, 27, 20, 4.102907734657698, -18.506040345024942)
grid = sin(grid, 8, 16, -3.2047717871756047, 50.031074019769875)
grid = sin(grid, 1, 3, -1.2298315940257807, 67.92621901520556)
grid = transit(grid, 10, [5, 12, 6, 7, 21], [0.04017259629004803, 0.035935993838594436, 0.38852998766486463, 0.39464597353448644, 0.14071544867200647])
grid = transit(grid, 17, [9, 22, 0, 8], [0.07232347283831883, 0.23909484436189507, 0.16476406248235922, 0.523817620317427])
grid = sin(grid, 11, 23, 3.4291605572557367, 77.65408388973503)
grid = sin(grid, 20, 27, 1.9438852600878178, -69.26160333661483)
grid = transit(grid, 19, [13, 7, 14, 12, 29], [0.15078244000703844, 0.30329951250855647, 0.052826921314074654, 0.014009457594495888, 0.47908166857583445])
grid = magnitude(grid, 4, [13, 5, 9, 16, 11, 6, 24, 14, 12, 1, 3, 22, 20, 8, 7, 15], 2)
grid = power(grid, 5, 12, 0.32534340921919336)
grid = sin(grid, 10, 18, -1.5546372261064723, 79.40807200485779)
grid = transit(grid, 0, [2, 17, 23, 0, 9, 10, 6, 20, 26, 22, 25, 11, 27, 21, 13, 12, 1, 16], [0.04666741358409691, 0.10041235530271467, 0.13656890415482237, 0.0505888093029676, 0.07654195022307062, 0.18419004471071113, 0.05046245679380782, 0.007552503418946401, 0.0004481713476168337, 0.011885682921671083, 0.032088980266198504, 0.0023404498982659153, 0.03348183036453658, 0.045952614669238355, 0.10372072735870042, 0.01789271596791753, 0.04799484234941445, 0.05120954736530277])
grid = transit(grid, 5, [25, 27], [0.5725941398341273, 0.4274058601658726])
grid = transit(grid, 19, [15, 29, 23, 21, 11, 10, 20, 4, 17, 2, 0, 13, 3, 1, 5, 8], [0.031537796589242675, 0.02689608568216389, 0.006352970412025167, 0.02945197836138472, 0.004733254055032123, 0.031841731262449186, 0.09701814809592517, 0.021578470501320998, 0.0071680768933244385, 0.11952705984922679, 0.05536518282979036, 0.06581861347697791, 0.2343306944232139, 0.10779797912646302, 0.09300120880000046, 0.06758074964145924])
grid = sin(grid, 11, 0, 2.563909435379265, 0.4450018649816627)
grid = magnitude(grid, 27, [16, 11, 8, 0, 28, 12, 17, 15, 18, 20, 26], 2)
grid = sin(grid, 19, 21, -3.4132246278785883, 84.16701903091374)
grid = sin(grid, 28, 13, -2.3675718627702755, 41.175741022703875)
grid = transit(grid, 7, [10, 9, 27, 23, 19, 5, 1, 22, 7], [0.14363062171830737, 0.01687188812140151, 0.13196113638415463, 0.17866921525288296, 0.07172903294369104, 0.1268434984434265, 0.09262190525507281, 0.13843366504033602, 0.09923903684072709])
grid = sin(grid, 1, 17, 0.4190217510777763, -96.31752118334663)
grid = sin(grid, 4, 25, -3.0130782974573114, 0.045638670109738655)
grid = sin(grid, 23, 16, -6.362468253360612, 24.730444687537883)
grid = sin(grid, 17, 14, -2.3747658845203916, -57.23440657206675)
grid = sin(grid, 19, 19, 2.4592230816940326, -51.76853764043066)
grid = magnitude(grid, 26, [4], 2)
grid = sin(grid, 0, 26, 2.879410066683457, 5.223173332129804)
grid = sin(grid, 10, 6, -3.2555765761277127, -17.443575197843472)
grid = transit(grid, 25, [24, 2, 25, 0, 12, 4], [0.03568795619526225, 0.08500737200701228, 0.05240229364632595, 0.603980978240824, 0.17712678127987705, 0.04579461863069845])
grid = sin(grid, 6, 29, -0.12051802831906497, 89.64443842624468)
grid = transit(grid, 5, [27, 16, 23, 9, 17], [0.22131895230663756, 0.07144315447485797, 0.49736096611646524, 0.13402807138531572, 0.07584885571672335])
grid = transit(grid, 12, [22, 8, 29, 16], [0.25792429467789524, 0.25168913008212207, 0.38751847922685195, 0.10286809601313074])
grid = magnitude(grid, 24, [12], 2)
grid = sin(grid, 21, 18, 6.904882453110925, 43.76686597000625)
grid = sin(grid, 27, 12, -2.3149706703321784, 91.4634229451533)
grid = transit(grid, 11, [18, 8, 23], [0.9456048289219839, 0.02282944678495521, 0.031565724293060864])
grid = sin(grid, 3, 22, -5.427035197241231, 70.63770520279803)
grid = sin(grid, 27, 15, -0.8306409707765449, 16.388610614890496)
grid = magnitude(grid, 22, [19, 20, 0, 14, 29, 16, 13, 11, 12, 7], 2)
grid = sin(grid, 22, 2, -0.2131858223375026, 23.110302271816437)
grid = transit(grid, 15, [26, 17, 18, 3, 20, 6], [0.26298407289730785, 0.036929318879447975, 0.21956318893577373, 0.12140448131206344, 0.2932362214654605, 0.06588271650994641])
grid = transit(grid, 6, [10, 0, 14, 4, 16, 26, 29], [0.3266526570756889, 0.010367316493219989, 0.06038405155138366, 0.18542143850276785, 0.15350917236048142, 0.1066459060285463, 0.1570194579879119])
grid = transit(grid, 3, [29, 9, 12, 18, 10, 17], [0.30897372209328766, 0.10630103874152365, 0.15658027364196273, 0.29474023685015555, 0.1326016707345515, 0.0008030579385190207])
grid = magnitude(grid, 11, [16, 7, 25, 22, 3, 17, 13, 0, 12, 27], 2)
grid = sin(grid, 13, 1, 2.9658129882147084, -41.317540719432344)
grid = magnitude(grid, 2, [19, 13, 16, 27, 26], 2)
grid = sin(grid, 1, 8, 6.699130217836646, 5.293135687331116)
grid = sin(grid, 0, 27, -6.580745881619362, 70.25836976864827)
grid = transit(grid, 24, [5], [1.0])
grid = sin(grid, 10, 7, -3.7620909835549288, -17.85297224969564)
grid = sin(grid, 18, 17, 6.374775580070441, -82.34320143877852)
grid = transit(grid, 27, [21, 3, 19, 27, 10, 17, 13], [0.03817904844946292, 0.12948414697169902, 0.02053094019023183, 0.17470975944365325, 0.2705479342577574, 0.164395301382941, 0.20215286930425458])
colors = np.zeros((6, 3))
colors[0] = [51, 169, 182]
colors[1] = [8, 23, 138]
colors[2] = [93, 97, 239]
colors[3] = [98, 25, 66]
colors[4] = [60, 71, 233]
colors[5] = [191, 187, 173]
res = np.zeros((SIZE, SIZE, 3))
res += (grid[:,:,0:0+1].repeat(3, -1) + 1) / 2 * colors[0]
res += (grid[:,:,1:1+1].repeat(3, -1) + 1) / 2 * colors[1]
res += (grid[:,:,2:2+1].repeat(3, -1) + 1) / 2 * colors[2]
res += (grid[:,:,3:3+1].repeat(3, -1) + 1) / 2 * colors[3]
res += (grid[:,:,4:4+1].repeat(3, -1) + 1) / 2 * colors[4]
res += (grid[:,:,5:5+1].repeat(3, -1) + 1) / 2 * colors[5]
res = res / colors.sum(0) * 255
im = Image.fromarray(np.uint8(res))
im.save(os.path.basename(__file__) + '.png')
| true | true |
1c3b1cd38bdc17c30ae3aa866f74ca00c594e86d | 863 | py | Python | exercises/en/test_03_12.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 2,085 | 2019-04-17T13:10:40.000Z | 2022-03-30T21:51:46.000Z | exercises/en/test_03_12.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 79 | 2019-04-18T14:42:55.000Z | 2022-03-07T08:15:43.000Z | exercises/en/test_03_12.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
] | 361 | 2019-04-17T13:34:32.000Z | 2022-03-28T04:42:45.000Z | def test():
assert (
len(nlp.pipeline) == 1 and nlp.pipe_names[0] == "countries_component"
), "Did you add the component correctly?"
assert Span.has_extension("capital"), "Did you set the extension on the span?"
ext = Span.get_extension("capital")
assert ext[2] is not None, "Did you register get_capital as the getter?"
assert (
"(ent.text, ent.label_, ent._.capital)" in __solution__
), "Are you printing the correct attributes?"
assert len(doc.ents) == 2, "Looks like the entities didn't get set correctly?"
assert (
doc.ents[0]._.capital == "Prague" and doc.ents[1]._.capital == "Bratislava"
), "Looks like the capital attribute isn't working correctly."
__msg__.good(
"Well done! This is a great example of how you can add structured "
"data to your spaCy pipeline."
)
| 43.15 | 83 | 0.653534 | def test():
assert (
len(nlp.pipeline) == 1 and nlp.pipe_names[0] == "countries_component"
), "Did you add the component correctly?"
assert Span.has_extension("capital"), "Did you set the extension on the span?"
ext = Span.get_extension("capital")
assert ext[2] is not None, "Did you register get_capital as the getter?"
assert (
"(ent.text, ent.label_, ent._.capital)" in __solution__
), "Are you printing the correct attributes?"
assert len(doc.ents) == 2, "Looks like the entities didn't get set correctly?"
assert (
doc.ents[0]._.capital == "Prague" and doc.ents[1]._.capital == "Bratislava"
), "Looks like the capital attribute isn't working correctly."
__msg__.good(
"Well done! This is a great example of how you can add structured "
"data to your spaCy pipeline."
)
| true | true |
1c3b1dca3d55ec0370a15a8cf85954b428319f53 | 2,231 | py | Python | projects/convai/convai_bot.py | markr-fu-berlin/ParlAI | 23f014c38ee502091fdd8623f5c8a6f2c3216e92 | [
"BSD-3-Clause"
] | 2 | 2020-03-22T10:18:09.000Z | 2020-05-06T21:48:47.000Z | projects/convai/convai_bot.py | urvishdesai/dialogue-encoding-tasks-parlai | 29743cc7b47c413c2181f68c0b7ef40a6f06a40f | [
"BSD-3-Clause"
] | null | null | null | projects/convai/convai_bot.py | urvishdesai/dialogue-encoding-tasks-parlai | 29743cc7b47c413c2181f68c0b7ef40a6f06a40f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-present, Moscow Institute of Physics and Technology.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from projects.convai.convai_world import ConvAIWorld
from parlai.core.params import ParlaiParser
from parlai.core.agents import Agent, create_agent
from parlai.core.utils import display_messages
import random
class ConvAISampleAgent(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = 'ConvAISampleAgent'
self.text = 'Nothing to say yet!'
self.episode_done = False
def observe(self, observation):
print('\t' + display_messages([observation]))
self.observation = observation
self.episode_done = observation['episode_done']
if self.episode_done:
self.text = '/end'
else:
self.text = random.choice([
'I love you!',
'Wow!',
'Really?',
'Nice!',
'Hi',
'Hello',
'This is not very interesting. Let\'s change the subject of the '
'conversation and talk about cats.',
'/end'])
def act(self):
reply = {
'id': self.getID(),
'text': self.text,
'episode_done': self.episode_done
}
print('\t' + display_messages([reply]))
return reply
def setup_args():
parser = ParlaiParser(True, True)
ConvAIWorld.add_cmdline_args(parser)
return parser
def run_convai_bot(opt):
agent = create_agent(opt)
world = ConvAIWorld(opt, [agent])
while True:
try:
world.parley()
except Exception as e:
print('Exception: {}'.format(e))
def main():
parser = setup_args()
parser.set_params(model='projects.convai.convai_bot:ConvAISampleAgent')
opt = parser.parse_args()
print('Run ConvAI bot in inifinite loop...')
run_convai_bot(opt)
if __name__ == '__main__':
main()
| 27.8875 | 81 | 0.612281 |
from projects.convai.convai_world import ConvAIWorld
from parlai.core.params import ParlaiParser
from parlai.core.agents import Agent, create_agent
from parlai.core.utils import display_messages
import random
class ConvAISampleAgent(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = 'ConvAISampleAgent'
self.text = 'Nothing to say yet!'
self.episode_done = False
def observe(self, observation):
print('\t' + display_messages([observation]))
self.observation = observation
self.episode_done = observation['episode_done']
if self.episode_done:
self.text = '/end'
else:
self.text = random.choice([
'I love you!',
'Wow!',
'Really?',
'Nice!',
'Hi',
'Hello',
'This is not very interesting. Let\'s change the subject of the '
'conversation and talk about cats.',
'/end'])
def act(self):
reply = {
'id': self.getID(),
'text': self.text,
'episode_done': self.episode_done
}
print('\t' + display_messages([reply]))
return reply
def setup_args():
parser = ParlaiParser(True, True)
ConvAIWorld.add_cmdline_args(parser)
return parser
def run_convai_bot(opt):
agent = create_agent(opt)
world = ConvAIWorld(opt, [agent])
while True:
try:
world.parley()
except Exception as e:
print('Exception: {}'.format(e))
def main():
parser = setup_args()
parser.set_params(model='projects.convai.convai_bot:ConvAISampleAgent')
opt = parser.parse_args()
print('Run ConvAI bot in inifinite loop...')
run_convai_bot(opt)
if __name__ == '__main__':
main()
| true | true |
1c3b1e0c3cc5687f4dad6a7c5ee3273236a0217a | 707 | py | Python | plugin/src/py/android_screenshot_tests/no_op_device_name_calculator.py | AlexBeggs/screenshot-tests-for-android | 0e0212232e349d63e7f84d07c7680449b8a72120 | [
"Apache-2.0"
] | 1,747 | 2015-10-06T17:01:21.000Z | 2022-03-27T00:37:41.000Z | plugin/src/py/android_screenshot_tests/no_op_device_name_calculator.py | AlexBeggs/screenshot-tests-for-android | 0e0212232e349d63e7f84d07c7680449b8a72120 | [
"Apache-2.0"
] | 257 | 2015-10-06T19:41:43.000Z | 2022-03-25T09:40:48.000Z | plugin/src/py/android_screenshot_tests/no_op_device_name_calculator.py | AlexBeggs/screenshot-tests-for-android | 0e0212232e349d63e7f84d07c7680449b8a72120 | [
"Apache-2.0"
] | 258 | 2015-10-06T18:02:28.000Z | 2022-03-26T19:32:28.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NoOpDeviceNameCalculator:
def __init__(self):
pass
def name(self):
return ""
| 32.136364 | 74 | 0.732673 |
class NoOpDeviceNameCalculator:
def __init__(self):
pass
def name(self):
return ""
| true | true |
1c3b1e47587865d29b076ad3b6b78fd1a463bbd0 | 975 | py | Python | python/2/intersection.py | idimaster/playard | 647d813f5f0d21efd98cb7ba8b23fa3a00f58cd4 | [
"MIT"
] | null | null | null | python/2/intersection.py | idimaster/playard | 647d813f5f0d21efd98cb7ba8b23fa3a00f58cd4 | [
"MIT"
] | null | null | null | python/2/intersection.py | idimaster/playard | 647d813f5f0d21efd98cb7ba8b23fa3a00f58cd4 | [
"MIT"
] | null | null | null | class Node:
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def check(lst1, lst2):
while lst1:
head = lst2
while head:
if head == lst1:
return True
head = head.next
lst1 = lst1.next
return False
def check2(lst1, lst2):
s = set()
while lst1:
s.add(lst1)
lst1 = lst1.next
while lst2:
if lst2 in s:
return True
lst2 = lst2.next
return False
def check3(lst1, lst2):
while lst1.next:
lst1 = lst1.next
while lst2.next:
lst2 = lst2.next
return lst1 == lst2
lst = Node('a', Node('b', Node('c')))
lst0 = Node('a', Node('b', Node('c')))
lst1 = Node('d', Node('f', lst))
lst2 = Node('k', Node('l', Node('m', lst)))
print(check(lst1, lst2))
print(check(lst, lst0))
print(check2(lst1, lst2))
print(check2(lst, lst0))
print(check3(lst1, lst2))
print(check3(lst, lst0))
| 18.75 | 45 | 0.541538 | class Node:
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def check(lst1, lst2):
while lst1:
head = lst2
while head:
if head == lst1:
return True
head = head.next
lst1 = lst1.next
return False
def check2(lst1, lst2):
s = set()
while lst1:
s.add(lst1)
lst1 = lst1.next
while lst2:
if lst2 in s:
return True
lst2 = lst2.next
return False
def check3(lst1, lst2):
while lst1.next:
lst1 = lst1.next
while lst2.next:
lst2 = lst2.next
return lst1 == lst2
lst = Node('a', Node('b', Node('c')))
lst0 = Node('a', Node('b', Node('c')))
lst1 = Node('d', Node('f', lst))
lst2 = Node('k', Node('l', Node('m', lst)))
print(check(lst1, lst2))
print(check(lst, lst0))
print(check2(lst1, lst2))
print(check2(lst, lst0))
print(check3(lst1, lst2))
print(check3(lst, lst0))
| true | true |
1c3b1f65a17c4d8548367cba52b1989d07f1860e | 1,982 | py | Python | tests/integration/examples/test_config_secure_example.py | matilda-me/neo4j-python-driver | 4fb25a266841bf2a861f00d5dcf257bd5ae5c686 | [
"Apache-2.0"
] | null | null | null | tests/integration/examples/test_config_secure_example.py | matilda-me/neo4j-python-driver | 4fb25a266841bf2a861f00d5dcf257bd5ae5c686 | [
"Apache-2.0"
] | null | null | null | tests/integration/examples/test_config_secure_example.py | matilda-me/neo4j-python-driver | 4fb25a266841bf2a861f00d5dcf257bd5ae5c686 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from neo4j._exceptions import BoltHandshakeError
from neo4j.exceptions import ServiceUnavailable
from tests.integration.examples import DriverSetupExample
# isort: off
# tag::config-secure-import[]
import neo4j
from neo4j import GraphDatabase
# end::config-secure-import[]
# isort: off
# python -m pytest tests/integration/examples/test_config_secure_example.py -s -v
class ConfigSecureExample(DriverSetupExample):
# tag::config-secure[]
def __init__(self, uri, auth):
# trusted_certificates:
# neo4j.TrustSystemCAs()
# (default) trust certificates from system store)
# neo4j.TrustAll()
# trust all certificates
# neo4j.TrustCustomCAs("<path>", ...)
# specify a list of paths to certificates to trust
self.driver = GraphDatabase.driver(
uri, auth=auth, encrypted=True,
trusted_certificates=neo4j.TrustSystemCAs(),
)
# or omit trusted_certificates as None is the default
# end::config-secure[]
def test_example(uri, auth):
pytest.skip("re-enable when we can test with encrypted=True on Docker")
try:
ConfigSecureExample.test(uri, auth)
except ServiceUnavailable as error:
if isinstance(error.__cause__, BoltHandshakeError):
pytest.skip(error.args[0])
| 31.967742 | 81 | 0.707366 |
import pytest
from neo4j._exceptions import BoltHandshakeError
from neo4j.exceptions import ServiceUnavailable
from tests.integration.examples import DriverSetupExample
import neo4j
from neo4j import GraphDatabase
class ConfigSecureExample(DriverSetupExample):
def __init__(self, uri, auth):
self.driver = GraphDatabase.driver(
uri, auth=auth, encrypted=True,
trusted_certificates=neo4j.TrustSystemCAs(),
)
def test_example(uri, auth):
pytest.skip("re-enable when we can test with encrypted=True on Docker")
try:
ConfigSecureExample.test(uri, auth)
except ServiceUnavailable as error:
if isinstance(error.__cause__, BoltHandshakeError):
pytest.skip(error.args[0])
| true | true |
1c3b1fd2feb6059341825a17cd39398395bdea6d | 864 | py | Python | python/509.fibonacci-number.py | fengbaoheng/leetcode | 2b6ec9adea383503acc23622ca5623161f7ca520 | [
"MIT"
] | 1 | 2019-04-11T12:34:55.000Z | 2019-04-11T12:34:55.000Z | python/509.fibonacci-number.py | fengbaoheng/leetcode | 2b6ec9adea383503acc23622ca5623161f7ca520 | [
"MIT"
] | null | null | null | python/509.fibonacci-number.py | fengbaoheng/leetcode | 2b6ec9adea383503acc23622ca5623161f7ca520 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=509 lang=python3
#
# [509] Fibonacci Number
#
# https://leetcode-cn.com/problems/fibonacci-number/description/
#
# algorithms
# Easy (65.10%)
# Total Accepted: 5.9K
# Total Submissions: 9K
# Testcase Example: '2'
#
# 斐波那契数,通常用 F(n) 表示,形成的序列称为斐波那契数列。该数列由 0 和 1 开始,后面的每一项数字都是前面两项数字的和。也就是:
#
# F(0) = 0, F(1) = 1
# F(N) = F(N - 1) + F(N - 2), 其中 N > 1.
#
#
# 给定 N,计算 F(N)。
#
#
#
# 示例 1:
#
# 输入:2
# 输出:1
# 解释:F(2) = F(1) + F(0) = 1 + 0 = 1.
#
#
# 示例 2:
#
# 输入:3
# 输出:2
# 解释:F(3) = F(2) + F(1) = 1 + 1 = 2.
#
#
# 示例 3:
#
# 输入:4
# 输出:3
# 解释:F(4) = F(3) + F(2) = 2 + 1 = 3.
#
#
#
#
# 提示:
#
#
# 0 ≤ N ≤ 30
#
#
#
class Solution:
# 正向计算, 逆向递归会超时
def fib(self, N: int) -> int:
f = [0,1]
for i in range(2, N+1):
f.append(f[i-1] + f[i-2])
return f[N]
| 13.292308 | 71 | 0.456019 |
class Solution:
def fib(self, N: int) -> int:
f = [0,1]
for i in range(2, N+1):
f.append(f[i-1] + f[i-2])
return f[N]
| true | true |
1c3b200bebb7e070e6f4149dbd331eb1f225d018 | 3,156 | py | Python | Project/serve/predict.py | PankajPatil1/SageMaker-Deployment | be608dd09e82098fc87f2522a380472773dd9a37 | [
"MIT"
] | 1 | 2021-01-09T12:03:23.000Z | 2021-01-09T12:03:23.000Z | Project/serve/predict.py | PankajPatil1/SageMaker-Deployment | be608dd09e82098fc87f2522a380472773dd9a37 | [
"MIT"
] | null | null | null | Project/serve/predict.py | PankajPatil1/SageMaker-Deployment | be608dd09e82098fc87f2522a380472773dd9a37 | [
"MIT"
] | null | null | null | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the store model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
# TODO: Process input_data so that it is ready to be sent to our model.
# You should produce two variables:
# data_X - A sequence of length 500 which represents the converted review
# data_len - The length of the review
processed_review = review_to_words(input_data)
data_X, data_len = convert_and_pad(model.word_dict,processed_review)
# Using data_X and data_len we construct an appropriate input tensor. Remember
# that our model expects input data of the form 'len, review[500]'.
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
# Make sure to put the model into evaluation mode
model.eval()
# TODO: Compute the result of applying the model to the input data. The variable `result` should
# be a numpy array which contains a single integer which is either 1 or 0
with torch.no_grad():
output = model(data)
result = np.round(output.numpy())
return result
return result
| 32.875 | 107 | 0.700253 | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
print("Loading model.")
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
processed_review = review_to_words(input_data)
data_X, data_len = convert_and_pad(model.word_dict,processed_review)
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
model.eval()
with torch.no_grad():
output = model(data)
result = np.round(output.numpy())
return result
return result
| true | true |
1c3b2017513b36ddd6ef8b39858c5b73e4170761 | 11,055 | py | Python | fedora_utilities/forms.py | jermnelson/aristotle-library-apps | f742847cd20c5b5c3b46dd53dfc395a2e1caa240 | [
"Apache-2.0"
] | 2 | 2015-03-30T16:36:51.000Z | 2016-06-15T01:39:47.000Z | fedora_utilities/forms.py | jermnelson/aristotle-library-apps | f742847cd20c5b5c3b46dd53dfc395a2e1caa240 | [
"Apache-2.0"
] | 2 | 2021-06-10T17:43:54.000Z | 2021-12-13T19:40:08.000Z | fedora_utilities/forms.py | jermnelson/aristotle-library-apps | f742847cd20c5b5c3b46dd53dfc395a2e1caa240 | [
"Apache-2.0"
] | 1 | 2015-11-08T00:40:11.000Z | 2015-11-08T00:40:11.000Z | __author__ = "Jeremy Nelson"
import datetime
from django import forms
from fedora_utilities.models import *
from eulfedora.server import Repository
from eulfedora.util import RequestFailed
repository = Repository()
DIGITAL_ORIGIN = [(1, 'born digital'),
(2, 'reformatted digital'),
(3, 'digitized microfilm'),
(4, 'digitized other analog')]
GENRE = [('choose', 'Choose...')]
INSTITUTION_NAME = 'Colorado College'
MARC_FREQUENCY = [('choose', 'Choose...'),
('Semiweekly', 'Semiweekly - 2 times a week'),
('Three times a week', 'Three times a week'),
('Weekly', 'Weekly'),
('Biweekly', 'Biweekly - every 2 weeks'),
('Three times a month', 'Three times a month'),
('Semimonthly', 'Semimonthly - 2 times a month'),
('Monthly', 'Monthly'),
('Bimonthly', 'Bimonthly - every 2 months'),
('Quarterly', 'Quarterly'),
('Three times a year', 'Three times a year'),
('Semiannual', 'Semiannual - 2 times a year'),
('Annual', 'Annual'),
('Biennial', 'Biennial - every 2 years'),
('Triennial', 'Triennial - every 3 years'),
('Completely irregular', 'Completely irregular')]
OBJECT_TEMPLATES = [(0, 'Choose model'),
(1, 'Meeting Minutes'),
(2, 'Newsletter'),
(3, 'Podcast'),
(4, 'Video'),
(5, 'Master (All fields)')]
RIGHTS_STATEMENT = "Copyright restrictions apply. Contact Colorado College for permission to publish."
PLACE = 'Colorado Springs (Colo.)'
PUBLISHER = "Colorado College"
PUBLICATION_PLACE = 'Colorado Springs, Colorado'
class AddFedoraObjectFromTemplate(forms.Form):
admin_note = forms.CharField(label='Administrative Notes',
max_length=1500,
required=False,
widget=forms.Textarea(
attrs={'rows':5,
'class': 'form-control'}))
alt_title = forms.CharField(label='Alternative Title',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
collection_pid = forms.CharField(max_length=20,
label="PID of Parent Collection",
widget=forms.TextInput(
attrs={'class': 'form-control'}))
contributors = forms.CharField(required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
corporate_contributors = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
corporate_creators = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
creators = forms.CharField(required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
date_created = forms.CharField(label='Date Created',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
digital_origin = forms.ChoiceField(choices=DIGITAL_ORIGIN,
label='Digital Origin',
initial=1,
widget=forms.Select(
attrs={
'class': 'form-control'}))
description = forms.CharField(label='Description',
max_length=1500,
widget=forms.Textarea(
attrs={'class': 'form-control',
'rows':5}),
required=False)
extent = forms.CharField(label='Extent',
max_length=1500,
widget=forms.Textarea(
attrs={'rows':5,
'class': 'form-control',
'data-bind': 'value: extentValue'}),
required=False)
form = forms.CharField(label='Form',
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'data-bind': 'value: formValue'}))
frequency_free_form = forms.CharField(label='Other',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
frequency = forms.ChoiceField(choices=MARC_FREQUENCY,
label='Frequency',
required=False,
widget=forms.Select(
attrs={'class': 'form-control'}))
genre = forms.ChoiceField(
label='Genre',
required=False,
widget=forms.Select(
attrs={'data-bind': "options: genreOptions, optionsText: 'name', optionsValue: 'value'",
'class': 'form-control'}))
genre_free_form = forms.CharField(label='Other',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
number_objects = forms.CharField(initial=1,
label='Number of stub records',
max_length=5,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
object_template = forms.ChoiceField(label='Content Model Template',
choices=OBJECT_TEMPLATES,
widget=forms.Select(
attrs={
'class': 'form-control',
'data-bind':'value: chosenContentModel, click: displayContentModel'}))
organizations = forms.CharField(max_length=255,
required=False,
initial=INSTITUTION_NAME,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
rights_holder = forms.CharField(max_length=255,
label='Rights Statement',
initial=RIGHTS_STATEMENT,
widget=forms.Textarea(
attrs={'rows': 3,
'class': 'form-control'}))
subject_dates = forms.CharField(label='Subject -- Dates',
required=False,
widget=forms.TextInput(
{'class': 'form-control'}))
subject_people = forms.CharField(label='Subject -- People',
required=False,
widget=forms.TextInput(
{'class': 'form-control'}))
subject_places = forms.CharField(label='Subject -- Places',
required=False,
initial=PLACE,
widget=forms.TextInput(
{'class': 'form-control'}))
subject_topics = forms.CharField(
label='Subject -- Topic',
required=False,
widget=forms.TextInput(
attrs={'data-bind': 'value: topicOne',
'class': 'form-control'}))
title = forms.CharField(max_length=120,
label='Title',
widget=forms.TextInput(
attrs={'class': 'form-control'}))
type_of_resource = forms.CharField(
label='Type of Resource',
required=False,
widget=forms.TextInput(
attrs={'data-bind': 'value: typeOfResource',
'class': 'form-control'}))
def clean(self):
if self._errors.has_key('genre'):
del self._errors['genre']
return self.cleaned_data
class BatchIngestForm(forms.Form):
collection_pid = forms.CharField(max_length=20)
compressed_file = forms.FileField(label="A .tar or .zip file",
required=False)
## target_directory = forms.FileField(label="Select Directory to upload",
## required=False,
## widget=forms.ClearableFileInput(attrs={"webkitdirectory":"",
## "directory":"",
## "mozdirectory":""}))
class BatchModifyMetadataForm(forms.ModelForm):
class Meta:
model = BatchModifyMetadataLog
exclude = ('created_on')
class ObjectMovementForm(forms.Form):
"""
`MoverForm` allows a user to input a Fedora Commons Repository PID and
a new parent collection PID for moving the object.
"""
collection_pid = forms.CharField(max_length=20,
label="PID of target collection",
help_text='PID of target collection')
source_pid = forms.CharField(max_length=20,
label="PID of source PID",
help_text='PID of source Fedora Object')
def clean_collection_pid(self):
"""
Custom clean method for :class:`MoverForm.collection_pid` checks to see
if PID exists in Repository, raise :mod:`forms.ValidationError` if PID
not present.
"""
data = self.cleaned_data['collection_pid']
if data is not None:
try:
collection_object = repository.api.getObjectHistory(pid=data)
except RequestFailed:
raise forms.ValidationError("Collection PID %s not found in repository" % data)
return data
| 48.065217 | 119 | 0.44758 | __author__ = "Jeremy Nelson"
import datetime
from django import forms
from fedora_utilities.models import *
from eulfedora.server import Repository
from eulfedora.util import RequestFailed
repository = Repository()
DIGITAL_ORIGIN = [(1, 'born digital'),
(2, 'reformatted digital'),
(3, 'digitized microfilm'),
(4, 'digitized other analog')]
GENRE = [('choose', 'Choose...')]
INSTITUTION_NAME = 'Colorado College'
MARC_FREQUENCY = [('choose', 'Choose...'),
('Semiweekly', 'Semiweekly - 2 times a week'),
('Three times a week', 'Three times a week'),
('Weekly', 'Weekly'),
('Biweekly', 'Biweekly - every 2 weeks'),
('Three times a month', 'Three times a month'),
('Semimonthly', 'Semimonthly - 2 times a month'),
('Monthly', 'Monthly'),
('Bimonthly', 'Bimonthly - every 2 months'),
('Quarterly', 'Quarterly'),
('Three times a year', 'Three times a year'),
('Semiannual', 'Semiannual - 2 times a year'),
('Annual', 'Annual'),
('Biennial', 'Biennial - every 2 years'),
('Triennial', 'Triennial - every 3 years'),
('Completely irregular', 'Completely irregular')]
OBJECT_TEMPLATES = [(0, 'Choose model'),
(1, 'Meeting Minutes'),
(2, 'Newsletter'),
(3, 'Podcast'),
(4, 'Video'),
(5, 'Master (All fields)')]
RIGHTS_STATEMENT = "Copyright restrictions apply. Contact Colorado College for permission to publish."
PLACE = 'Colorado Springs (Colo.)'
PUBLISHER = "Colorado College"
PUBLICATION_PLACE = 'Colorado Springs, Colorado'
class AddFedoraObjectFromTemplate(forms.Form):
admin_note = forms.CharField(label='Administrative Notes',
max_length=1500,
required=False,
widget=forms.Textarea(
attrs={'rows':5,
'class': 'form-control'}))
alt_title = forms.CharField(label='Alternative Title',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
collection_pid = forms.CharField(max_length=20,
label="PID of Parent Collection",
widget=forms.TextInput(
attrs={'class': 'form-control'}))
contributors = forms.CharField(required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
corporate_contributors = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
corporate_creators = forms.CharField(
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
creators = forms.CharField(required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
date_created = forms.CharField(label='Date Created',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
digital_origin = forms.ChoiceField(choices=DIGITAL_ORIGIN,
label='Digital Origin',
initial=1,
widget=forms.Select(
attrs={
'class': 'form-control'}))
description = forms.CharField(label='Description',
max_length=1500,
widget=forms.Textarea(
attrs={'class': 'form-control',
'rows':5}),
required=False)
extent = forms.CharField(label='Extent',
max_length=1500,
widget=forms.Textarea(
attrs={'rows':5,
'class': 'form-control',
'data-bind': 'value: extentValue'}),
required=False)
form = forms.CharField(label='Form',
required=False,
widget=forms.TextInput(
attrs={
'class': 'form-control',
'data-bind': 'value: formValue'}))
frequency_free_form = forms.CharField(label='Other',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
frequency = forms.ChoiceField(choices=MARC_FREQUENCY,
label='Frequency',
required=False,
widget=forms.Select(
attrs={'class': 'form-control'}))
genre = forms.ChoiceField(
label='Genre',
required=False,
widget=forms.Select(
attrs={'data-bind': "options: genreOptions, optionsText: 'name', optionsValue: 'value'",
'class': 'form-control'}))
genre_free_form = forms.CharField(label='Other',
required=False,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
number_objects = forms.CharField(initial=1,
label='Number of stub records',
max_length=5,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
object_template = forms.ChoiceField(label='Content Model Template',
choices=OBJECT_TEMPLATES,
widget=forms.Select(
attrs={
'class': 'form-control',
'data-bind':'value: chosenContentModel, click: displayContentModel'}))
organizations = forms.CharField(max_length=255,
required=False,
initial=INSTITUTION_NAME,
widget=forms.TextInput(
attrs={'class': 'form-control'}))
rights_holder = forms.CharField(max_length=255,
label='Rights Statement',
initial=RIGHTS_STATEMENT,
widget=forms.Textarea(
attrs={'rows': 3,
'class': 'form-control'}))
subject_dates = forms.CharField(label='Subject -- Dates',
required=False,
widget=forms.TextInput(
{'class': 'form-control'}))
subject_people = forms.CharField(label='Subject -- People',
required=False,
widget=forms.TextInput(
{'class': 'form-control'}))
subject_places = forms.CharField(label='Subject -- Places',
required=False,
initial=PLACE,
widget=forms.TextInput(
{'class': 'form-control'}))
subject_topics = forms.CharField(
label='Subject -- Topic',
required=False,
widget=forms.TextInput(
attrs={'data-bind': 'value: topicOne',
'class': 'form-control'}))
title = forms.CharField(max_length=120,
label='Title',
widget=forms.TextInput(
attrs={'class': 'form-control'}))
type_of_resource = forms.CharField(
label='Type of Resource',
required=False,
widget=forms.TextInput(
attrs={'data-bind': 'value: typeOfResource',
'class': 'form-control'}))
def clean(self):
if self._errors.has_key('genre'):
del self._errors['genre']
return self.cleaned_data
class BatchIngestForm(forms.Form):
collection_pid = forms.CharField(max_length=20)
compressed_file = forms.FileField(label="A .tar or .zip file",
required=False)
ength=20,
label="PID of source PID",
help_text='PID of source Fedora Object')
def clean_collection_pid(self):
data = self.cleaned_data['collection_pid']
if data is not None:
try:
collection_object = repository.api.getObjectHistory(pid=data)
except RequestFailed:
raise forms.ValidationError("Collection PID %s not found in repository" % data)
return data
| true | true |
1c3b2035cec8ad04c7bf5c57c278d4962c099b4b | 2,597 | py | Python | device_loop.py | gve-sw/gve_devnet_webexclouddevices_reporting | c11e730b31cfff4ea8d6aa28aa1018802284945a | [
"RSA-MD"
] | null | null | null | device_loop.py | gve-sw/gve_devnet_webexclouddevices_reporting | c11e730b31cfff4ea8d6aa28aa1018802284945a | [
"RSA-MD"
] | null | null | null | device_loop.py | gve-sw/gve_devnet_webexclouddevices_reporting | c11e730b31cfff4ea8d6aa28aa1018802284945a | [
"RSA-MD"
] | null | null | null | '''
Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
'''
# Gets two lists: A personal Devices List and a place device list
import requests
from credentials import token
place_device_list = []
place_device_name = []
personal_device_list = []
personal_device_name = []
url = "https://webexapis.com/v1/devices"
params = {"max": 100}
payload={}
headers = {'Authorization': f'Bearer {token}'}
#response = requests.request("GET", url, headers=headers, data=payload).json()
# make the initial call for first page of results
response = requests.get(url, headers=headers, params=params)
#process first page of results
for item in response.json()['items']:
for key, value in item.items():
if key == 'placeId':
place_device_list.append(item['id'])
place_device_name.append(item['displayName'])
elif key == 'personId':
personal_device_list.append(item['ip'])
personal_device_name.append(item['displayName'])
else:
pass
while response.headers.get('Link'):
# Get the URL from the Link header
next_url = response.links['next']['url']
print(f"NEXT: {next_url}")
# Request the next set of data
response = requests.get(next_url, headers=headers)
if response.headers.get('Link'):
# process next page of results, if any
for item in response.json()['items']:
for key, value in item.items():
if key == 'placeId':
place_device_list.append(item['id'])
place_device_name.append(item['displayName'])
elif key == 'personId':
personal_device_list.append(item['ip'])
personal_device_name.append(item['displayName'])
else:
pass
else:
print('No Link header, finished!')
# Test Lists
#print('place device list: ',place_device_list)
#print('place device name: ',place_device_name)
#print('personal device list: ',personal_device_list)
#print('personal device name: ',personal_device_name)
| 33.727273 | 78 | 0.67732 |
import requests
from credentials import token
place_device_list = []
place_device_name = []
personal_device_list = []
personal_device_name = []
url = "https://webexapis.com/v1/devices"
params = {"max": 100}
payload={}
headers = {'Authorization': f'Bearer {token}'}
response = requests.get(url, headers=headers, params=params)
for item in response.json()['items']:
for key, value in item.items():
if key == 'placeId':
place_device_list.append(item['id'])
place_device_name.append(item['displayName'])
elif key == 'personId':
personal_device_list.append(item['ip'])
personal_device_name.append(item['displayName'])
else:
pass
while response.headers.get('Link'):
next_url = response.links['next']['url']
print(f"NEXT: {next_url}")
response = requests.get(next_url, headers=headers)
if response.headers.get('Link'):
for item in response.json()['items']:
for key, value in item.items():
if key == 'placeId':
place_device_list.append(item['id'])
place_device_name.append(item['displayName'])
elif key == 'personId':
personal_device_list.append(item['ip'])
personal_device_name.append(item['displayName'])
else:
pass
else:
print('No Link header, finished!')
| true | true |
1c3b20476bf26b3e3eb7476884ad5716e216e0ac | 25,871 | py | Python | flexget/plugins/clients/rtorrent.py | davidcollom/Flexget | cd763e04afdf6da8f1673dd567a42d55d4cb3b6c | [
"MIT"
] | 1 | 2021-03-24T11:54:01.000Z | 2021-03-24T11:54:01.000Z | flexget/plugins/clients/rtorrent.py | davidcollom/Flexget | cd763e04afdf6da8f1673dd567a42d55d4cb3b6c | [
"MIT"
] | null | null | null | flexget/plugins/clients/rtorrent.py | davidcollom/Flexget | cd763e04afdf6da8f1673dd567a42d55d4cb3b6c | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.xmlrpc import client as xmlrpc_client
from future.moves.urllib.parse import urlparse, urljoin
from future.utils import native_str
import logging
import os
import socket
import re
from time import sleep
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.config_schema import one_or_more
from flexget.utils.bittorrent import Torrent, is_torrent_file
from requests.auth import HTTPDigestAuth, HTTPBasicAuth
log = logging.getLogger('rtorrent')
class _Method(object):
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
class HTTPDigestTransport(xmlrpc_client.Transport):
"""
Transport that uses requests to support Digest authentication.
"""
def __init__(self, scheme, digest_auth, username, password, session, *args, **kwargs):
self.__scheme = scheme
self.__session = session
self.__digest_auth = digest_auth
self.__username = username
self.__password = password
self.verbose = 0
xmlrpc_client.Transport.__init__(self, *args, **kwargs) # old style class
def request(self, host, handler, request_body, verbose=False):
return self.single_request(host, handler, request_body, verbose)
def single_request(self, host, handler, request_body, verbose=0):
url = urljoin('{0}://{1}'.format(self.__scheme, host), handler)
auth = self.get_auth()
response = self.send_request(url, auth, request_body)
# if status code is 401, it means we used the wrong auth method
if response.status_code == 401:
log.warning('%s auth failed. Retrying with %s. Please change your config.',
'Digest' if self.__digest_auth else 'Basic',
'Basic' if self.__digest_auth else 'Digest')
self.__digest_auth = not self.__digest_auth
auth = self.get_auth()
response = self.send_request(url, auth, request_body)
response.raise_for_status()
return self.parse_response(response)
def get_auth(self):
if self.__digest_auth:
return HTTPDigestAuth(self.__username, self.__password)
return HTTPBasicAuth(self.__username, self.__password)
def send_request(self, url, auth, data):
return self.__session.post(url, auth=auth, data=data, raise_status=False)
def parse_response(self, response):
p, u = self.getparser()
if self.verbose:
log.info('body: %s', repr(response))
p.feed(response.content)
p.close()
return u.close()
class SCGITransport(xmlrpc_client.Transport):
""" Used to override the default xmlrpclib transport to support SCGI """
def __init__(self, *args, **kwargs):
self.verbose = 0
xmlrpc_client.Transport.__init__(self, *args, **kwargs)
def request(self, host, handler, request_body, verbose=False):
return self.single_request(host, handler, request_body, verbose)
def single_request(self, host, handler, request_body, verbose=0):
# Add SCGI headers to the request.
headers = [('CONTENT_LENGTH', native_str(len(request_body))), ('SCGI', '1')]
header = '\x00'.join(['%s\x00%s' % (key, value) for key, value in headers]) + '\x00'
header = '%d:%s' % (len(header), header)
request_body = '%s,%s' % (header, request_body)
sock = None
try:
if host:
parsed_host = urlparse(host)
host = parsed_host.hostname
port = parsed_host.port
addr_info = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(*addr_info[0][:3])
sock.connect(addr_info[0][4])
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(handler)
self.verbose = verbose
sock.send(request_body.encode())
return self.parse_response(sock.makefile())
finally:
if sock:
sock.close()
def parse_response(self, response):
p, u = self.getparser()
response_body = ''
while True:
data = response.read(1024)
if not data:
break
response_body += data
if self.verbose:
log.info('body: %s', repr(response_body))
# Remove SCGI headers from the response.
_, response_body = re.split(r'\n\s*?\n', response_body, maxsplit=1)
p.feed(response_body)
p.close()
return u.close()
class SCGIServerProxy(object):
""" Enable connection to SCGI proxy """
def __init__(self, uri, transport=None, encoding=None,
verbose=False, allow_none=False, use_datetime=False):
parsed_url = urlparse(uri)
self.__host = uri if parsed_url.scheme else None
self.__handler = parsed_url.path
if not self.__handler:
self.__handler = '/'
if not transport:
transport = SCGITransport(use_datetime=use_datetime)
self.__transport = transport
self.__encoding = encoding or 'utf-8'
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, method_name, params):
# call a method on the remote server
request = xmlrpc_client.dumps(params, method_name, encoding=self.__encoding,
allow_none=self.__allow_none).encode(self.__encoding)
response = self.__transport.request(
self.__host,
self.__handler,
request.decode('utf-8'),
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<ServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return _Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def __call__(self, attr):
"""A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == "close":
return self.__close
elif attr == "transport":
return self.__transport
raise AttributeError("Attribute %r not found" % (attr,))
class RTorrent(object):
""" rTorrent API client """
default_fields = (
'hash',
'name',
'up_total', 'down_total', 'down_rate',
'is_open', 'is_active',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5',
'state', 'complete',
'bytes_done', 'down.rate', 'left_bytes',
'ratio',
'base_path',
)
required_fields = (
'hash',
'name',
'base_path'
)
def __init__(self, uri, username=None, password=None, digest_auth=None, session=None):
"""
New connection to rTorrent
:param uri: RTorrent URL. Supports both http(s) and scgi
:param username: Username for basic auth over http(s)
:param password: Password for basic auth over http(s)
"""
self.uri = uri
self.username = username
self.password = password
self.digest_auth = digest_auth
self._version = None
parsed_uri = urlparse(uri)
if self.username and self.password and parsed_uri.scheme not in ['http', 'https']:
raise IOError('Username and password only supported on http(s)')
# Determine the proxy server
if parsed_uri.scheme in ['http', 'https']:
sp = xmlrpc_client.ServerProxy
elif parsed_uri.scheme == 'scgi':
sp = SCGIServerProxy
elif parsed_uri.scheme == '' and parsed_uri.path:
self.uri = parsed_uri.path
sp = SCGIServerProxy
else:
raise IOError('Unsupported scheme %s for uri %s' % (parsed_uri.scheme, self.uri))
# Use a special transport if http(s)
if parsed_uri.scheme in ['http', 'https']:
self._server = sp(self.uri, transport=HTTPDigestTransport(parsed_uri.scheme, self.digest_auth,
self.username, self.password, session))
else:
self._server = sp(self.uri)
def _clean_fields(self, fields, reverse=False):
if not fields:
fields = list(self.default_fields)
if reverse:
for field in ['up.total', 'down.total', 'down.rate']:
if field in fields:
fields[fields.index(field)] = native_str(field.replace('.', '_'))
return fields
for required_field in self.required_fields:
if required_field not in fields:
fields.insert(0, required_field)
for field in ['up_total', 'down_total', 'down_rate']:
if field in fields:
fields[fields.index(field)] = native_str(field.replace('_', '.'))
return fields
@property
def version(self):
return [int(v) for v in self._server.system.client_version().split('.')]
def load(self, raw_torrent, fields=None, start=False, mkdir=True):
if fields is None:
fields = {}
# First param is empty 'target'
params = ['', xmlrpc_client.Binary(raw_torrent)]
# Additional fields to set
for key, val in fields.items():
# Values must be escaped if within params
params.append('d.%s.set=%s' % (key, re.escape(native_str(val))))
if mkdir and 'directory' in fields:
result = self._server.execute.throw('', 'mkdir', '-p', fields['directory'])
if result != 0:
raise xmlrpc_client.Error('Failed creating directory %s' % fields['directory'])
# by default rtorrent won't allow calls over 512kb in size.
xmlrpc_size = len(xmlrpc_client.dumps(tuple(params), 'raw_start')) + 71680 # Add 70kb for buffer
if xmlrpc_size > 524288:
prev_size = self._server.network.xmlrpc.size_limit()
self._server.network.xmlrpc.size_limit.set('', xmlrpc_size)
# Call load method and return the response
if start:
result = self._server.load.raw_start(*params)
else:
result = self._server.load.raw(*params)
if xmlrpc_size > 524288:
self._server.network.xmlrpc.size_limit.set('', prev_size)
return result
def torrent(self, info_hash, fields=None):
""" Get the details of a torrent """
info_hash = native_str(info_hash)
if not fields:
fields = list(self.default_fields)
fields = self._clean_fields(fields)
multi_call = xmlrpc_client.MultiCall(self._server)
for field in fields:
method_name = 'd.%s' % field
getattr(multi_call, method_name)(info_hash)
resp = multi_call()
# TODO: Maybe we should return a named tuple or a Torrent class?
return dict(list(zip(self._clean_fields(fields, reverse=True), [val for val in resp])))
def torrents(self, view='main', fields=None):
if not fields:
fields = list(self.default_fields)
fields = self._clean_fields(fields)
params = ['d.%s=' % field for field in fields]
params.insert(0, view)
resp = self._server.d.multicall(params)
# Response is formatted as a list of lists, with just the values
return [dict(list(zip(self._clean_fields(fields, reverse=True), val))) for val in resp]
def update(self, info_hash, fields):
multi_call = xmlrpc_client.MultiCall(self._server)
for key, val in fields.items():
method_name = 'd.%s.set' % key
getattr(multi_call, method_name)(native_str(info_hash), native_str(val))
return multi_call()[0]
def delete(self, info_hash):
return self._server.d.erase(native_str(info_hash))
def stop(self, info_hash):
self._server.d.stop(info_hash)
return self._server.d.close(native_str(info_hash))
def start(self, info_hash):
return self._server.d.start(native_str(info_hash))
def move(self, info_hash, dst_path):
info_hash = native_str(info_hash)
self.stop(info_hash)
torrent = self.torrent(info_hash, fields=['base_path'])
try:
log.verbose('Creating destination directory `%s`' % dst_path)
self._server.execute.throw('', 'mkdir', '-p', dst_path)
except xmlrpc_client.Error:
raise xmlrpc_client.Error("unable to create folder %s" % dst_path)
self._server.execute.throw('', 'mv', '-u', torrent['base_path'], dst_path)
self._server.d.set_directory(info_hash, dst_path)
self.start(info_hash)
class RTorrentPluginBase(object):
priority_map = {
'high': 3,
'medium': 2,
'low': 1,
'off': 0,
}
def _build_options(self, config, entry, entry_first=True):
options = {}
for opt_key in ('path', 'message', 'priority',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5'):
# Values do not merge config with task
# Task takes priority then config is used
entry_value = entry.get(opt_key)
config_value = config.get(opt_key)
if entry_first:
if entry_value:
options[opt_key] = entry.render(entry_value)
elif config_value:
options[opt_key] = entry.render(config_value)
else:
if config_value:
options[opt_key] = entry.render(config_value)
elif entry_value:
options[opt_key] = entry.render(entry_value)
# Convert priority from string to int
priority = options.get('priority')
if priority and priority in self.priority_map:
options['priority'] = self.priority_map[priority]
# Map Flexget path to directory in rTorrent
if options.get('path'):
options['directory'] = options['path']
del options['path']
if 'directory' in options:
options['directory'] = pathscrub(options['directory'])
return options
def on_task_start(self, task, config):
try:
client = RTorrent(os.path.expanduser(config['uri']),
username=config.get('username'),
password=config.get('password'),
digest_auth=config['digest_auth'],
session=task.requests)
if client.version < [0, 9, 2]:
log.error('rtorrent version >=0.9.2 required, found {0}'.format('.'.join(map(str, client.version))))
task.abort('rtorrent version >=0.9.2 required, found {0}'.format('.'.join(map(str, client.version))))
except (IOError, xmlrpc_client.Error) as e:
raise plugin.PluginError("Couldn't connect to rTorrent: %s" % str(e))
class RTorrentOutputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
# connection info
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'digest_auth': {'type': 'boolean', 'default': False},
'start': {'type': 'boolean', 'default': True},
'mkdir': {'type': 'boolean', 'default': True},
'action': {'type': 'string', 'emun': ['update', 'delete', 'add'], 'default': 'add'},
# properties to set on rtorrent download object
'message': {'type': 'string'},
'priority': {'type': 'string'},
'path': {'type': 'string'},
'custom1': {'type': 'string'},
'custom2': {'type': 'string'},
'custom3': {'type': 'string'},
'custom4': {'type': 'string'},
'custom5': {'type': 'string'},
},
'required': ['uri'],
'additionalProperties': False,
}
def _verify_load(self, client, info_hash):
e = IOError()
for _ in range(0, 5):
try:
return client.torrent(info_hash, fields=['hash'])
except (IOError, xmlrpc_client.Error) as e:
sleep(0.5)
raise e
@plugin.priority(120)
def on_task_download(self, task, config):
# If the download plugin is not enabled, we need to call it to get
# our temp .torrent files
if config['action'] == 'add' and 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.get_temp_files(task, handle_magnets=True, fail_html=True)
@plugin.priority(135)
def on_task_output(self, task, config):
client = RTorrent(os.path.expanduser(config['uri']),
username=config.get('username'),
password=config.get('password'),
digest_auth=config['digest_auth'],
session=task.requests)
for entry in task.accepted:
if task.options.test:
log.info('Would add %s to rTorrent' % entry['url'])
continue
if config['action'] == 'add':
try:
options = self._build_options(config, entry)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
continue
self.add_entry(client, entry, options, start=config['start'], mkdir=config['mkdir'])
info_hash = entry.get('torrent_info_hash')
if not info_hash:
entry.fail('Failed to %s as no info_hash found' % config['action'])
continue
if config['action'] == 'delete':
self.delete_entry(client, entry)
if config['action'] == 'update':
self.update_entry(client, entry, config)
def delete_entry(self, client, entry):
try:
client.delete(entry['torrent_info_hash'])
log.verbose('Deleted %s (%s) in rtorrent ' % (entry['title'], entry['torrent_info_hash']))
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed to delete: %s' % str(e))
return
def update_entry(self, client, entry, config):
info_hash = entry['torrent_info_hash']
# First check if it already exists
try:
existing = client.torrent(info_hash, fields=['base_path'])
except IOError as e:
entry.fail("Error updating torrent %s" % str(e))
return
except xmlrpc_client.Error as e:
existing = False
# Build options but make config values override entry values
try:
options = self._build_options(config, entry, entry_first=False)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
return
if existing and 'directory' in options:
# Check if changing to another directory which requires a move
if options['directory'] != existing['base_path'] \
and options['directory'] != os.path.dirname(existing['base_path']):
try:
log.verbose("Path is changing, moving files from '%s' to '%s'"
% (existing['base_path'], options['directory']))
client.move(info_hash, options['directory'])
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed moving torrent: %s' % str(e))
return
# Remove directory from update otherwise rTorrent will append the title to the directory path
if 'directory' in options:
del options['directory']
try:
client.update(info_hash, options)
log.verbose('Updated %s (%s) in rtorrent ' % (entry['title'], info_hash))
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed to update: %s' % str(e))
return
def add_entry(self, client, entry, options, start=True, mkdir=False):
if 'torrent_info_hash' not in entry:
entry.fail('missing torrent_info_hash')
return
if entry['url'].startswith('magnet:'):
torrent_raw = 'd10:magnet-uri%d:%se' % (len(entry['url']), entry['url'])
torrent_raw = torrent_raw.encode('ascii')
else:
# Check that file is downloaded
if 'file' not in entry:
raise plugin.PluginError('Temporary download file is missing from entry')
# Verify the temp file exists
if not os.path.exists(entry['file']):
raise plugin.PluginError('Temporary download file is missing from disk')
# Verify valid torrent file
if not is_torrent_file(entry['file']):
entry.fail("Downloaded temp file '%s' is not a torrent file" % entry['file'])
return
try:
with open(entry['file'], 'rb') as f:
torrent_raw = f.read()
except IOError as e:
entry.fail('Failed to add to rTorrent %s' % str(e))
return
try:
Torrent(torrent_raw)
except SyntaxError as e:
entry.fail('Strange, unable to decode torrent, raise a BUG: %s' % str(e))
return
# First check if it already exists
try:
if client.torrent(entry['torrent_info_hash']):
log.warning("Torrent %s already exists, won't add" % entry['title'])
return
except IOError as e:
entry.fail("Error checking if torrent already exists %s" % str(e))
except xmlrpc_client.Error:
# No existing found
pass
try:
resp = client.load(torrent_raw, fields=options, start=start, mkdir=mkdir)
if resp != 0:
entry.fail('Failed to add to rTorrent invalid return value %s' % resp)
except (IOError, xmlrpc_client.Error) as e:
log.exception(e)
entry.fail('Failed to add to rTorrent %s' % str(e))
return
# Verify the torrent loaded
try:
self._verify_load(client, entry['torrent_info_hash'])
log.info('%s added to rtorrent' % entry['title'])
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed to verify torrent loaded: %s' % str(e))
def on_task_exit(self, task, config):
""" Make sure all temp files are cleaned up when task exists """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
on_task_abort = on_task_exit
class RTorrentInputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'digest_auth': {'type': 'boolean', 'default': False},
'view': {'type': 'string', 'default': 'main'},
'fields': one_or_more({'type': 'string', 'enum': list(RTorrent.default_fields)}),
},
'required': ['uri'],
'additionalProperties': False
}
def on_task_input(self, task, config):
client = RTorrent(os.path.expanduser(config['uri']),
username=config.get('username'),
password=config.get('password'),
digest_auth=config['digest_auth'],
session=task.requests)
fields = config.get('fields')
try:
torrents = client.torrents(config['view'], fields=fields)
except (IOError, xmlrpc_client.Error) as e:
task.abort('Could not get torrents (%s): %s' % (config['view'], e))
return
entries = []
for torrent in torrents:
entry = Entry(
title=torrent['name'],
url='%s/%s' % (os.path.expanduser(config['uri']),
torrent['hash']),
path=torrent['base_path'],
torrent_info_hash=torrent['hash'],
)
for attr, value in torrent.items():
entry[attr] = value
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(RTorrentOutputPlugin, 'rtorrent', api_ver=2)
plugin.register(RTorrentInputPlugin, 'from_rtorrent', api_ver=2)
| 35.931944 | 117 | 0.577983 | from __future__ import unicode_literals, division, absolute_import
from builtins import *
from future.moves.xmlrpc import client as xmlrpc_client
from future.moves.urllib.parse import urlparse, urljoin
from future.utils import native_str
import logging
import os
import socket
import re
from time import sleep
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.config_schema import one_or_more
from flexget.utils.bittorrent import Torrent, is_torrent_file
from requests.auth import HTTPDigestAuth, HTTPBasicAuth
log = logging.getLogger('rtorrent')
class _Method(object):
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
class HTTPDigestTransport(xmlrpc_client.Transport):
def __init__(self, scheme, digest_auth, username, password, session, *args, **kwargs):
self.__scheme = scheme
self.__session = session
self.__digest_auth = digest_auth
self.__username = username
self.__password = password
self.verbose = 0
xmlrpc_client.Transport.__init__(self, *args, **kwargs)
def request(self, host, handler, request_body, verbose=False):
return self.single_request(host, handler, request_body, verbose)
def single_request(self, host, handler, request_body, verbose=0):
url = urljoin('{0}://{1}'.format(self.__scheme, host), handler)
auth = self.get_auth()
response = self.send_request(url, auth, request_body)
if response.status_code == 401:
log.warning('%s auth failed. Retrying with %s. Please change your config.',
'Digest' if self.__digest_auth else 'Basic',
'Basic' if self.__digest_auth else 'Digest')
self.__digest_auth = not self.__digest_auth
auth = self.get_auth()
response = self.send_request(url, auth, request_body)
response.raise_for_status()
return self.parse_response(response)
def get_auth(self):
if self.__digest_auth:
return HTTPDigestAuth(self.__username, self.__password)
return HTTPBasicAuth(self.__username, self.__password)
def send_request(self, url, auth, data):
return self.__session.post(url, auth=auth, data=data, raise_status=False)
def parse_response(self, response):
p, u = self.getparser()
if self.verbose:
log.info('body: %s', repr(response))
p.feed(response.content)
p.close()
return u.close()
class SCGITransport(xmlrpc_client.Transport):
def __init__(self, *args, **kwargs):
self.verbose = 0
xmlrpc_client.Transport.__init__(self, *args, **kwargs)
def request(self, host, handler, request_body, verbose=False):
return self.single_request(host, handler, request_body, verbose)
def single_request(self, host, handler, request_body, verbose=0):
headers = [('CONTENT_LENGTH', native_str(len(request_body))), ('SCGI', '1')]
header = '\x00'.join(['%s\x00%s' % (key, value) for key, value in headers]) + '\x00'
header = '%d:%s' % (len(header), header)
request_body = '%s,%s' % (header, request_body)
sock = None
try:
if host:
parsed_host = urlparse(host)
host = parsed_host.hostname
port = parsed_host.port
addr_info = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(*addr_info[0][:3])
sock.connect(addr_info[0][4])
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(handler)
self.verbose = verbose
sock.send(request_body.encode())
return self.parse_response(sock.makefile())
finally:
if sock:
sock.close()
def parse_response(self, response):
p, u = self.getparser()
response_body = ''
while True:
data = response.read(1024)
if not data:
break
response_body += data
if self.verbose:
log.info('body: %s', repr(response_body))
_, response_body = re.split(r'\n\s*?\n', response_body, maxsplit=1)
p.feed(response_body)
p.close()
return u.close()
class SCGIServerProxy(object):
def __init__(self, uri, transport=None, encoding=None,
verbose=False, allow_none=False, use_datetime=False):
parsed_url = urlparse(uri)
self.__host = uri if parsed_url.scheme else None
self.__handler = parsed_url.path
if not self.__handler:
self.__handler = '/'
if not transport:
transport = SCGITransport(use_datetime=use_datetime)
self.__transport = transport
self.__encoding = encoding or 'utf-8'
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, method_name, params):
request = xmlrpc_client.dumps(params, method_name, encoding=self.__encoding,
allow_none=self.__allow_none).encode(self.__encoding)
response = self.__transport.request(
self.__host,
self.__handler,
request.decode('utf-8'),
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<ServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
return _Method(self.__request, name)
def __call__(self, attr):
if attr == "close":
return self.__close
elif attr == "transport":
return self.__transport
raise AttributeError("Attribute %r not found" % (attr,))
class RTorrent(object):
default_fields = (
'hash',
'name',
'up_total', 'down_total', 'down_rate',
'is_open', 'is_active',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5',
'state', 'complete',
'bytes_done', 'down.rate', 'left_bytes',
'ratio',
'base_path',
)
required_fields = (
'hash',
'name',
'base_path'
)
def __init__(self, uri, username=None, password=None, digest_auth=None, session=None):
self.uri = uri
self.username = username
self.password = password
self.digest_auth = digest_auth
self._version = None
parsed_uri = urlparse(uri)
if self.username and self.password and parsed_uri.scheme not in ['http', 'https']:
raise IOError('Username and password only supported on http(s)')
if parsed_uri.scheme in ['http', 'https']:
sp = xmlrpc_client.ServerProxy
elif parsed_uri.scheme == 'scgi':
sp = SCGIServerProxy
elif parsed_uri.scheme == '' and parsed_uri.path:
self.uri = parsed_uri.path
sp = SCGIServerProxy
else:
raise IOError('Unsupported scheme %s for uri %s' % (parsed_uri.scheme, self.uri))
if parsed_uri.scheme in ['http', 'https']:
self._server = sp(self.uri, transport=HTTPDigestTransport(parsed_uri.scheme, self.digest_auth,
self.username, self.password, session))
else:
self._server = sp(self.uri)
def _clean_fields(self, fields, reverse=False):
if not fields:
fields = list(self.default_fields)
if reverse:
for field in ['up.total', 'down.total', 'down.rate']:
if field in fields:
fields[fields.index(field)] = native_str(field.replace('.', '_'))
return fields
for required_field in self.required_fields:
if required_field not in fields:
fields.insert(0, required_field)
for field in ['up_total', 'down_total', 'down_rate']:
if field in fields:
fields[fields.index(field)] = native_str(field.replace('_', '.'))
return fields
@property
def version(self):
return [int(v) for v in self._server.system.client_version().split('.')]
def load(self, raw_torrent, fields=None, start=False, mkdir=True):
if fields is None:
fields = {}
params = ['', xmlrpc_client.Binary(raw_torrent)]
for key, val in fields.items():
params.append('d.%s.set=%s' % (key, re.escape(native_str(val))))
if mkdir and 'directory' in fields:
result = self._server.execute.throw('', 'mkdir', '-p', fields['directory'])
if result != 0:
raise xmlrpc_client.Error('Failed creating directory %s' % fields['directory'])
xmlrpc_size = len(xmlrpc_client.dumps(tuple(params), 'raw_start')) + 71680 # Add 70kb for buffer
if xmlrpc_size > 524288:
prev_size = self._server.network.xmlrpc.size_limit()
self._server.network.xmlrpc.size_limit.set('', xmlrpc_size)
# Call load method and return the response
if start:
result = self._server.load.raw_start(*params)
else:
result = self._server.load.raw(*params)
if xmlrpc_size > 524288:
self._server.network.xmlrpc.size_limit.set('', prev_size)
return result
def torrent(self, info_hash, fields=None):
info_hash = native_str(info_hash)
if not fields:
fields = list(self.default_fields)
fields = self._clean_fields(fields)
multi_call = xmlrpc_client.MultiCall(self._server)
for field in fields:
method_name = 'd.%s' % field
getattr(multi_call, method_name)(info_hash)
resp = multi_call()
# TODO: Maybe we should return a named tuple or a Torrent class?
return dict(list(zip(self._clean_fields(fields, reverse=True), [val for val in resp])))
def torrents(self, view='main', fields=None):
if not fields:
fields = list(self.default_fields)
fields = self._clean_fields(fields)
params = ['d.%s=' % field for field in fields]
params.insert(0, view)
resp = self._server.d.multicall(params)
# Response is formatted as a list of lists, with just the values
return [dict(list(zip(self._clean_fields(fields, reverse=True), val))) for val in resp]
def update(self, info_hash, fields):
multi_call = xmlrpc_client.MultiCall(self._server)
for key, val in fields.items():
method_name = 'd.%s.set' % key
getattr(multi_call, method_name)(native_str(info_hash), native_str(val))
return multi_call()[0]
def delete(self, info_hash):
return self._server.d.erase(native_str(info_hash))
def stop(self, info_hash):
self._server.d.stop(info_hash)
return self._server.d.close(native_str(info_hash))
def start(self, info_hash):
return self._server.d.start(native_str(info_hash))
def move(self, info_hash, dst_path):
info_hash = native_str(info_hash)
self.stop(info_hash)
torrent = self.torrent(info_hash, fields=['base_path'])
try:
log.verbose('Creating destination directory `%s`' % dst_path)
self._server.execute.throw('', 'mkdir', '-p', dst_path)
except xmlrpc_client.Error:
raise xmlrpc_client.Error("unable to create folder %s" % dst_path)
self._server.execute.throw('', 'mv', '-u', torrent['base_path'], dst_path)
self._server.d.set_directory(info_hash, dst_path)
self.start(info_hash)
class RTorrentPluginBase(object):
priority_map = {
'high': 3,
'medium': 2,
'low': 1,
'off': 0,
}
def _build_options(self, config, entry, entry_first=True):
options = {}
for opt_key in ('path', 'message', 'priority',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5'):
# Values do not merge config with task
# Task takes priority then config is used
entry_value = entry.get(opt_key)
config_value = config.get(opt_key)
if entry_first:
if entry_value:
options[opt_key] = entry.render(entry_value)
elif config_value:
options[opt_key] = entry.render(config_value)
else:
if config_value:
options[opt_key] = entry.render(config_value)
elif entry_value:
options[opt_key] = entry.render(entry_value)
# Convert priority from string to int
priority = options.get('priority')
if priority and priority in self.priority_map:
options['priority'] = self.priority_map[priority]
# Map Flexget path to directory in rTorrent
if options.get('path'):
options['directory'] = options['path']
del options['path']
if 'directory' in options:
options['directory'] = pathscrub(options['directory'])
return options
def on_task_start(self, task, config):
try:
client = RTorrent(os.path.expanduser(config['uri']),
username=config.get('username'),
password=config.get('password'),
digest_auth=config['digest_auth'],
session=task.requests)
if client.version < [0, 9, 2]:
log.error('rtorrent version >=0.9.2 required, found {0}'.format('.'.join(map(str, client.version))))
task.abort('rtorrent version >=0.9.2 required, found {0}'.format('.'.join(map(str, client.version))))
except (IOError, xmlrpc_client.Error) as e:
raise plugin.PluginError("Couldn't connect to rTorrent: %s" % str(e))
class RTorrentOutputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'digest_auth': {'type': 'boolean', 'default': False},
'start': {'type': 'boolean', 'default': True},
'mkdir': {'type': 'boolean', 'default': True},
'action': {'type': 'string', 'emun': ['update', 'delete', 'add'], 'default': 'add'},
'message': {'type': 'string'},
'priority': {'type': 'string'},
'path': {'type': 'string'},
'custom1': {'type': 'string'},
'custom2': {'type': 'string'},
'custom3': {'type': 'string'},
'custom4': {'type': 'string'},
'custom5': {'type': 'string'},
},
'required': ['uri'],
'additionalProperties': False,
}
def _verify_load(self, client, info_hash):
e = IOError()
for _ in range(0, 5):
try:
return client.torrent(info_hash, fields=['hash'])
except (IOError, xmlrpc_client.Error) as e:
sleep(0.5)
raise e
@plugin.priority(120)
def on_task_download(self, task, config):
if config['action'] == 'add' and 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.get_temp_files(task, handle_magnets=True, fail_html=True)
@plugin.priority(135)
def on_task_output(self, task, config):
client = RTorrent(os.path.expanduser(config['uri']),
username=config.get('username'),
password=config.get('password'),
digest_auth=config['digest_auth'],
session=task.requests)
for entry in task.accepted:
if task.options.test:
log.info('Would add %s to rTorrent' % entry['url'])
continue
if config['action'] == 'add':
try:
options = self._build_options(config, entry)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
continue
self.add_entry(client, entry, options, start=config['start'], mkdir=config['mkdir'])
info_hash = entry.get('torrent_info_hash')
if not info_hash:
entry.fail('Failed to %s as no info_hash found' % config['action'])
continue
if config['action'] == 'delete':
self.delete_entry(client, entry)
if config['action'] == 'update':
self.update_entry(client, entry, config)
def delete_entry(self, client, entry):
try:
client.delete(entry['torrent_info_hash'])
log.verbose('Deleted %s (%s) in rtorrent ' % (entry['title'], entry['torrent_info_hash']))
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed to delete: %s' % str(e))
return
def update_entry(self, client, entry, config):
info_hash = entry['torrent_info_hash']
try:
existing = client.torrent(info_hash, fields=['base_path'])
except IOError as e:
entry.fail("Error updating torrent %s" % str(e))
return
except xmlrpc_client.Error as e:
existing = False
try:
options = self._build_options(config, entry, entry_first=False)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
return
if existing and 'directory' in options:
if options['directory'] != existing['base_path'] \
and options['directory'] != os.path.dirname(existing['base_path']):
try:
log.verbose("Path is changing, moving files from '%s' to '%s'"
% (existing['base_path'], options['directory']))
client.move(info_hash, options['directory'])
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed moving torrent: %s' % str(e))
return
if 'directory' in options:
del options['directory']
try:
client.update(info_hash, options)
log.verbose('Updated %s (%s) in rtorrent ' % (entry['title'], info_hash))
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed to update: %s' % str(e))
return
def add_entry(self, client, entry, options, start=True, mkdir=False):
if 'torrent_info_hash' not in entry:
entry.fail('missing torrent_info_hash')
return
if entry['url'].startswith('magnet:'):
torrent_raw = 'd10:magnet-uri%d:%se' % (len(entry['url']), entry['url'])
torrent_raw = torrent_raw.encode('ascii')
else:
if 'file' not in entry:
raise plugin.PluginError('Temporary download file is missing from entry')
if not os.path.exists(entry['file']):
raise plugin.PluginError('Temporary download file is missing from disk')
if not is_torrent_file(entry['file']):
entry.fail("Downloaded temp file '%s' is not a torrent file" % entry['file'])
return
try:
with open(entry['file'], 'rb') as f:
torrent_raw = f.read()
except IOError as e:
entry.fail('Failed to add to rTorrent %s' % str(e))
return
try:
Torrent(torrent_raw)
except SyntaxError as e:
entry.fail('Strange, unable to decode torrent, raise a BUG: %s' % str(e))
return
try:
if client.torrent(entry['torrent_info_hash']):
log.warning("Torrent %s already exists, won't add" % entry['title'])
return
except IOError as e:
entry.fail("Error checking if torrent already exists %s" % str(e))
except xmlrpc_client.Error:
# No existing found
pass
try:
resp = client.load(torrent_raw, fields=options, start=start, mkdir=mkdir)
if resp != 0:
entry.fail('Failed to add to rTorrent invalid return value %s' % resp)
except (IOError, xmlrpc_client.Error) as e:
log.exception(e)
entry.fail('Failed to add to rTorrent %s' % str(e))
return
# Verify the torrent loaded
try:
self._verify_load(client, entry['torrent_info_hash'])
log.info('%s added to rtorrent' % entry['title'])
except (IOError, xmlrpc_client.Error) as e:
entry.fail('Failed to verify torrent loaded: %s' % str(e))
def on_task_exit(self, task, config):
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
on_task_abort = on_task_exit
class RTorrentInputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'digest_auth': {'type': 'boolean', 'default': False},
'view': {'type': 'string', 'default': 'main'},
'fields': one_or_more({'type': 'string', 'enum': list(RTorrent.default_fields)}),
},
'required': ['uri'],
'additionalProperties': False
}
def on_task_input(self, task, config):
client = RTorrent(os.path.expanduser(config['uri']),
username=config.get('username'),
password=config.get('password'),
digest_auth=config['digest_auth'],
session=task.requests)
fields = config.get('fields')
try:
torrents = client.torrents(config['view'], fields=fields)
except (IOError, xmlrpc_client.Error) as e:
task.abort('Could not get torrents (%s): %s' % (config['view'], e))
return
entries = []
for torrent in torrents:
entry = Entry(
title=torrent['name'],
url='%s/%s' % (os.path.expanduser(config['uri']),
torrent['hash']),
path=torrent['base_path'],
torrent_info_hash=torrent['hash'],
)
for attr, value in torrent.items():
entry[attr] = value
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(RTorrentOutputPlugin, 'rtorrent', api_ver=2)
plugin.register(RTorrentInputPlugin, 'from_rtorrent', api_ver=2)
| true | true |
1c3b207e2356cbcd1d34b8c086d0f5583457a91d | 7,011 | py | Python | pygcn/layers.py | NightmareNyx/pygcn | 3972f167ce7fcc41cb21284d75816dfd9a15f7ef | [
"MIT"
] | null | null | null | pygcn/layers.py | NightmareNyx/pygcn | 3972f167ce7fcc41cb21284d75816dfd9a15f7ef | [
"MIT"
] | null | null | null | pygcn/layers.py | NightmareNyx/pygcn | 3972f167ce7fcc41cb21284d75816dfd9a15f7ef | [
"MIT"
] | null | null | null | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True, init_method='xavier'):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters(method=init_method)
def reset_parameters(self, method='xavier'):
if method == 'uniform':
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
elif method == 'kaiming':
nn.init.kaiming_normal_(self.weight.data, a=0, mode='fan_in')
if self.bias is not None:
nn.init.constant_(self.bias.data, 0.0)
elif method == 'xavier':
nn.init.xavier_normal_(self.weight.data, gain=0.02) # Implement Xavier Uniform
if self.bias is not None:
nn.init.constant_(self.bias.data, 0.0)
else:
raise NotImplementedError
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GraphAttention(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttention, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
if torch.cuda.is_available():
param_type = torch.cuda.FloatTensor
else:
param_type = torch.FloatTensor
self.W = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(in_features, out_features).type(param_type),
gain=np.sqrt(2.0)), requires_grad=True)
self.a1 = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(out_features, 1).type(param_type),
gain=np.sqrt(2.0)), requires_grad=True)
self.a2 = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(out_features, 1).type(param_type),
gain=np.sqrt(2.0)), requires_grad=True)
self.leaky_relu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
f_1 = torch.mm(h, self.a1)
f_2 = torch.mm(h, self.a2)
e = self.leaky_relu(f_1 + f_2.transpose(0, 1))
zero_vec = -9e15 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class SpecialSpmmFunction(torch.autograd.Function):
"""Special function for only sparse region backpropataion layer."""
@staticmethod
def forward(ctx, indices, values, shape, b):
assert indices.requires_grad == False
a = torch.sparse_coo_tensor(indices, values, shape)
ctx.save_for_backward(a, b)
ctx.N = shape[0]
return torch.matmul(a, b)
@staticmethod
def backward(ctx, grad_output):
a, b = ctx.saved_tensors
grad_values = grad_b = None
if ctx.needs_input_grad[1]:
grad_a_dense = grad_output.matmul(b.t())
edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]
grad_values = grad_a_dense.view(-1)[edge_idx]
if ctx.needs_input_grad[3]:
grad_b = a.t().matmul(grad_output)
return None, grad_values, None, grad_b
class SpecialSpmm(nn.Module):
def forward(self, indices, values, shape, b):
return SpecialSpmmFunction.apply(indices, values, shape, b)
class SpGraphAttentionLayer(nn.Module):
"""
Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(SpGraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_normal_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(1, 2 * out_features)))
nn.init.xavier_normal_(self.a.data, gain=1.414)
self.dropout = nn.Dropout(dropout)
self.leaky_relu = nn.LeakyReLU(self.alpha)
self.special_spmm = SpecialSpmm()
def forward(self, input, adj):
N = input.size()[0]
edge = adj.nonzero().t()
h = torch.mm(input, self.W)
# h: N x out
assert not torch.isnan(h).any()
# Self-attention on the nodes - Shared attention mechanism
edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()
# edge: 2*D x E
edge_e = torch.exp(-self.leaky_relu(self.a.mm(edge_h).squeeze()))
assert not torch.isnan(edge_e).any()
# edge_e: E
e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), torch.ones(size=(N, 1)).cuda())
# e_rowsum: N x 1
edge_e = self.dropout(edge_e)
# edge_e: E
h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)
assert not torch.isnan(h_prime).any()
# h_prime: N x out
h_prime = h_prime.div(e_rowsum)
# h_prime: N x out
assert not torch.isnan(h_prime).any()
if self.concat:
# if this layer is not last layer,
return F.elu(h_prime)
else:
# if this layer is last layer,
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| 35.055 | 110 | 0.59535 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
class GraphConvolution(Module):
def __init__(self, in_features, out_features, bias=True, init_method='xavier'):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters(method=init_method)
def reset_parameters(self, method='xavier'):
if method == 'uniform':
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
elif method == 'kaiming':
nn.init.kaiming_normal_(self.weight.data, a=0, mode='fan_in')
if self.bias is not None:
nn.init.constant_(self.bias.data, 0.0)
elif method == 'xavier':
nn.init.xavier_normal_(self.weight.data, gain=0.02)
if self.bias is not None:
nn.init.constant_(self.bias.data, 0.0)
else:
raise NotImplementedError
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GraphAttention(nn.Module):
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttention, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
if torch.cuda.is_available():
param_type = torch.cuda.FloatTensor
else:
param_type = torch.FloatTensor
self.W = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(in_features, out_features).type(param_type),
gain=np.sqrt(2.0)), requires_grad=True)
self.a1 = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(out_features, 1).type(param_type),
gain=np.sqrt(2.0)), requires_grad=True)
self.a2 = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(out_features, 1).type(param_type),
gain=np.sqrt(2.0)), requires_grad=True)
self.leaky_relu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
f_1 = torch.mm(h, self.a1)
f_2 = torch.mm(h, self.a2)
e = self.leaky_relu(f_1 + f_2.transpose(0, 1))
zero_vec = -9e15 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class SpecialSpmmFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, indices, values, shape, b):
assert indices.requires_grad == False
a = torch.sparse_coo_tensor(indices, values, shape)
ctx.save_for_backward(a, b)
ctx.N = shape[0]
return torch.matmul(a, b)
@staticmethod
def backward(ctx, grad_output):
a, b = ctx.saved_tensors
grad_values = grad_b = None
if ctx.needs_input_grad[1]:
grad_a_dense = grad_output.matmul(b.t())
edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]
grad_values = grad_a_dense.view(-1)[edge_idx]
if ctx.needs_input_grad[3]:
grad_b = a.t().matmul(grad_output)
return None, grad_values, None, grad_b
class SpecialSpmm(nn.Module):
def forward(self, indices, values, shape, b):
return SpecialSpmmFunction.apply(indices, values, shape, b)
class SpGraphAttentionLayer(nn.Module):
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(SpGraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_normal_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(1, 2 * out_features)))
nn.init.xavier_normal_(self.a.data, gain=1.414)
self.dropout = nn.Dropout(dropout)
self.leaky_relu = nn.LeakyReLU(self.alpha)
self.special_spmm = SpecialSpmm()
def forward(self, input, adj):
N = input.size()[0]
edge = adj.nonzero().t()
h = torch.mm(input, self.W)
assert not torch.isnan(h).any()
edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()
edge_e = torch.exp(-self.leaky_relu(self.a.mm(edge_h).squeeze()))
assert not torch.isnan(edge_e).any()
e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), torch.ones(size=(N, 1)).cuda())
edge_e = self.dropout(edge_e)
h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)
assert not torch.isnan(h_prime).any()
h_prime = h_prime.div(e_rowsum)
assert not torch.isnan(h_prime).any()
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| true | true |
1c3b21a6051b643b15a54070521ba8535cdd779f | 963 | py | Python | serpent/templates/SerpentGamePlugin/files/serpent_game.py | DylanSpicker/SerpentAI | c48c4b072e0d1084a52eac569ad1c7fa02ac7348 | [
"MIT"
] | null | null | null | serpent/templates/SerpentGamePlugin/files/serpent_game.py | DylanSpicker/SerpentAI | c48c4b072e0d1084a52eac569ad1c7fa02ac7348 | [
"MIT"
] | null | null | null | serpent/templates/SerpentGamePlugin/files/serpent_game.py | DylanSpicker/SerpentAI | c48c4b072e0d1084a52eac569ad1c7fa02ac7348 | [
"MIT"
] | null | null | null | from serpent.game import Game
from .api.api import MyGameAPI
from serpent.utilities import Singleton
from serpent.game_launchers.web_browser_game_launcher import WebBrowser
import time
class SerpentGame(Game, metaclass=Singleton):
def __init__(self, **kwargs):
kwargs["platform"] = "PLATFORM"
kwargs["window_name"] = "WINDOW_NAME"
kwargs["app_id"] = "APP_ID"
kwargs["app_args"] = None
kwargs["executable_path"] = "EXECUTABLE_PATH"
kwargs["url"] = "URL"
kwargs["browser"] = WebBrowser.DEFAULT
kwargs["rom_path"] = "ROM_PATH"
kwargs["core_path"] = "CORE_PATH"
super().__init__(**kwargs)
self.api_class = MyGameAPI
self.api_instance = None
self.environments = dict()
self.environment_data = dict()
@property
def screen_regions(self):
regions = {
"SAMPLE_REGION": (0, 0, 0, 0)
}
return regions | 23.487805 | 71 | 0.624091 | from serpent.game import Game
from .api.api import MyGameAPI
from serpent.utilities import Singleton
from serpent.game_launchers.web_browser_game_launcher import WebBrowser
import time
class SerpentGame(Game, metaclass=Singleton):
def __init__(self, **kwargs):
kwargs["platform"] = "PLATFORM"
kwargs["window_name"] = "WINDOW_NAME"
kwargs["app_id"] = "APP_ID"
kwargs["app_args"] = None
kwargs["executable_path"] = "EXECUTABLE_PATH"
kwargs["url"] = "URL"
kwargs["browser"] = WebBrowser.DEFAULT
kwargs["rom_path"] = "ROM_PATH"
kwargs["core_path"] = "CORE_PATH"
super().__init__(**kwargs)
self.api_class = MyGameAPI
self.api_instance = None
self.environments = dict()
self.environment_data = dict()
@property
def screen_regions(self):
regions = {
"SAMPLE_REGION": (0, 0, 0, 0)
}
return regions | true | true |
1c3b234df1cdd611ccd280e733b2d1026d86edb6 | 4,266 | py | Python | openstack_dashboard/dashboards/sdscontroller/bandwidth_differentiation/slas/forms.py | iostackproject/SDS-dashboard | efa3d7968c738bfb10bc19776f24f2937d5802d8 | [
"Apache-2.0"
] | 1 | 2021-01-20T00:14:15.000Z | 2021-01-20T00:14:15.000Z | openstack_dashboard/dashboards/sdscontroller/bandwidth_differentiation/slas/forms.py | iostackproject/SDS-dashboard | efa3d7968c738bfb10bc19776f24f2937d5802d8 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/dashboards/sdscontroller/bandwidth_differentiation/slas/forms.py | iostackproject/SDS-dashboard | efa3d7968c738bfb10bc19776f24f2937d5802d8 | [
"Apache-2.0"
] | null | null | null | from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import sds_controller as api
from openstack_dashboard.dashboards.sdscontroller import common
from openstack_dashboard.dashboards.sdscontroller import exceptions as sdsexception
class CreateSLA(forms.SelfHandlingForm):
project_choices = []
project_id = forms.ChoiceField(choices=project_choices,
label=_("Project"),
help_text=_("The project where the rule will be applied."),
required=True)
policy_choices = []
policy_id = forms.ChoiceField(choices=policy_choices,
label=_("Storage Policy (Ring)"),
help_text=_("The storage policy that you want to assign to the specific project."),
required=True)
bandwidth = forms.CharField(max_length=255,
label=_("Bandwidth"),
help_text=_("The bandwidth that you want to assign to the specific project."),
widget=forms.TextInput(
attrs={"ng-model": "bandwidth", "not-blank": ""}
))
def __init__(self, request, *args, **kwargs):
# Obtain list of projects
self.project_choices = common.get_project_list_choices(request)
# Obtain list of storage policies
self.storage_policy_choices = common.get_storage_policy_list_choices(request)
# Initialization
super(CreateSLA, self).__init__(request, *args, **kwargs)
# Overwrite target_id input form
self.fields['project_id'] = forms.ChoiceField(choices=self.project_choices,
label=_("Project"),
help_text=_("The project where the rule will be apply."),
required=True)
self.fields['policy_id'] = forms.ChoiceField(choices=self.storage_policy_choices,
label=_("Storage Policy (Ring)"),
help_text=_("The storage policy that you want to assign to the specific project."),
required=True)
@staticmethod
def handle(request, data):
try:
response = api.bw_add_sla(request, data)
if 200 <= response.status_code < 300:
messages.success(request, _("Successfully SLA creation."))
return data
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:sdscontroller:bandwidth_differentiation:index")
error_message = "Unable to create sla.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class UpdateSLA(forms.SelfHandlingForm):
bandwidth = forms.CharField(max_length=255,
label=_("Bandwidth"),
required=False,
help_text=_("The new bandwidth that you want to assign to the specific project."))
def __init__(self, request, *args, **kwargs):
super(UpdateSLA, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
sla_id = self.initial["id"]
response = api.bw_update_sla(request, sla_id, data)
if 200 <= response.status_code < 300:
messages.success(request, _("Successfully sla update."))
return data
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:sdscontroller:bandwidth_differentiation:index")
error_message = "Unable to update sla.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
| 47.4 | 136 | 0.568214 | from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import sds_controller as api
from openstack_dashboard.dashboards.sdscontroller import common
from openstack_dashboard.dashboards.sdscontroller import exceptions as sdsexception
class CreateSLA(forms.SelfHandlingForm):
project_choices = []
project_id = forms.ChoiceField(choices=project_choices,
label=_("Project"),
help_text=_("The project where the rule will be applied."),
required=True)
policy_choices = []
policy_id = forms.ChoiceField(choices=policy_choices,
label=_("Storage Policy (Ring)"),
help_text=_("The storage policy that you want to assign to the specific project."),
required=True)
bandwidth = forms.CharField(max_length=255,
label=_("Bandwidth"),
help_text=_("The bandwidth that you want to assign to the specific project."),
widget=forms.TextInput(
attrs={"ng-model": "bandwidth", "not-blank": ""}
))
def __init__(self, request, *args, **kwargs):
self.project_choices = common.get_project_list_choices(request)
self.storage_policy_choices = common.get_storage_policy_list_choices(request)
super(CreateSLA, self).__init__(request, *args, **kwargs)
self.fields['project_id'] = forms.ChoiceField(choices=self.project_choices,
label=_("Project"),
help_text=_("The project where the rule will be apply."),
required=True)
self.fields['policy_id'] = forms.ChoiceField(choices=self.storage_policy_choices,
label=_("Storage Policy (Ring)"),
help_text=_("The storage policy that you want to assign to the specific project."),
required=True)
@staticmethod
def handle(request, data):
try:
response = api.bw_add_sla(request, data)
if 200 <= response.status_code < 300:
messages.success(request, _("Successfully SLA creation."))
return data
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:sdscontroller:bandwidth_differentiation:index")
error_message = "Unable to create sla.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class UpdateSLA(forms.SelfHandlingForm):
bandwidth = forms.CharField(max_length=255,
label=_("Bandwidth"),
required=False,
help_text=_("The new bandwidth that you want to assign to the specific project."))
def __init__(self, request, *args, **kwargs):
super(UpdateSLA, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
sla_id = self.initial["id"]
response = api.bw_update_sla(request, sla_id, data)
if 200 <= response.status_code < 300:
messages.success(request, _("Successfully sla update."))
return data
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:sdscontroller:bandwidth_differentiation:index")
error_message = "Unable to update sla.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
| true | true |
1c3b24f48ef9c579975997fead4bf975d5a3a08e | 27,381 | py | Python | scipy/optimize/tests/test_optimize.py | dlax/scipy | 221cb8fa31c45d08ec6d9f946ebf9476bdc1fccd | [
"BSD-3-Clause"
] | null | null | null | scipy/optimize/tests/test_optimize.py | dlax/scipy | 221cb8fa31c45d08ec6d9f946ebf9476bdc1fccd | [
"BSD-3-Clause"
] | null | null | null | scipy/optimize/tests/test_optimize.py | dlax/scipy | 221cb8fa31c45d08ec6d9f946ebf9476bdc1fccd | [
"BSD-3-Clause"
] | null | null | null | """
Unit tests for optimization routines from optimize.py and tnc.py
Authors:
Ed Schofield, Nov 2005
Andrew Straw, April 2008
To run it in its simplest form::
nosetests test_optimize.py
"""
from numpy.testing import assert_raises, assert_allclose, \
assert_equal, assert_, TestCase, run_module_suite
from scipy import optimize
import numpy as np
from math import pow
class TestOptimize(TestCase):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def setUp(self):
self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = 0
self.gradcalls = 0
self.trace = []
def func(self, x):
self.funccalls += 1
if self.funccalls > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
self.trace.append(x)
return f
def grad(self, x):
self.gradcalls += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
def test_cg(self, use_wrapper=False):
""" conjugate gradient optimization routine """
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='CG',
jac=self.grad, options=opts,
full_output=True,
retall=False)
fopt, func_calls, grad_calls, warnflag = \
info['fun'], info['nfev'], info['njev'], info['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (),
maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 9, self.funccalls)
assert_(self.gradcalls == 7, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs(self, use_wrapper=False):
""" Broyden-Fletcher-Goldfarb-Shanno optimization routine """
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
params, info = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS',
args=(), options=opts,
full_output=True,
retall=False)
fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
info['fun'], info['jac'], info['hess'], info['nfev'], \
info['njev'], info['status']
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_nan(self):
"""Test corner case where nan is fed to optimizer. See #1542."""
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
olderr = np.seterr(over='ignore')
try:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
finally:
np.seterr(**olderr)
def test_bfgs_numerical_jacobian(self):
""" BFGS with numerical jacobian and a vector epsilon parameter """
# define the epsilon parameter using a random vector
epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_bfgs_infinite(self, use_wrapper=False):
"""Test corner case where -Inf is the minimum. See #1494."""
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if use_wrapper:
opts = {'disp': False}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
def test_powell(self, use_wrapper=False):
""" Powell (direction set) optimization routine
"""
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='Powell',
options=opts,
full_output=True,
retall=False)
fopt, direc, numiter, func_calls, warnflag = \
info['fun'], info['direc'], info['nit'], info['nfev'], \
info['status']
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g. MKL, data alignment
# etc. affect the rounding error.
#
assert_(self.funccalls <= 116 + 20, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[34:39],
[[ 0.72949016, -0.44156936, 0.47100962],
[ 0.72949016, -0.44156936, 0.48052496],
[ 1.45898031, -0.88313872, 0.95153458],
[ 0.72949016, -0.44156936, 0.47576729],
[ 1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_neldermead(self, use_wrapper=False):
""" Nelder-Mead simplex algorithm
"""
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='Nelder-mead',
options=opts,
full_output=True,
retall=False)
fopt, numiter, func_calls, warnflag = \
info['fun'], info['nit'], info['nfev'], info['status']
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 167, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[76:78],
[[0.1928968 , -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_ncg(self, use_wrapper=False):
""" line-search Newton conjugate gradient optimization routine
"""
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts,
full_output=False, retall=False)
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian """
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess = self.hess,
args=(), options=opts,
full_output=False, retall=False)
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess = self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self, use_wrapper=False):
""" Newton conjugate gradient with Hessian times a vector p """
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess = self.hessp,
args=(), options=opts,
full_output=False, retall=False)
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p = self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_l_bfgs_b(self):
""" limited-memory bound-constrained BFGS algorithm
"""
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxfun=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls == 5, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[0. , -0.52489628, 0.48753042],
[0. , -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
""" L-BFGS-B with numerical jacobian """
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxfun=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
""" L-BFGS-B with combined objective function and jacobian """
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxfun=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_minimize(self):
"""Tests for the minimize wrapper."""
self.setUp()
self.test_bfgs(True)
self.setUp()
self.test_bfgs_infinite(True)
self.setUp()
self.test_cg(True)
self.setUp()
self.test_ncg(True)
self.setUp()
self.test_ncg_hess(True)
self.setUp()
self.test_ncg_hessp(True)
self.setUp()
self.test_neldermead(True)
self.setUp()
self.test_powell(True)
class TestOptimizeScalar(TestCase):
"""Tests for scalar optimizers"""
def setUp(self):
self.solution = 1.5
def fun(self, x):
"""Objective function"""
return (x - 1.5)**2 - 0.8
def test_brent(self):
""" brent algorithm """
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack = (-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack = (-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_fminbound(self):
"""Test fminbound """
x = optimize.fminbound(self.fun, 0, 1)
assert_allclose(x, 1, atol=1e-4)
x = optimize.fminbound(self.fun, 1, 5)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
def test_fminbound_scalar(self):
assert_raises(ValueError, optimize.fminbound, self.fun,
np.zeros(2), 1)
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
class TestTnc(TestCase):
"""TNC non-linear optimization.
These tests are taken from Prof. K. Schittkowski's test examples
for constrained non-linear programming.
http://www.uni-bayreuth.de/departments/math/~kschittkowski/home.htm
"""
# objective functions and jacobian for each test
def f1(self, x):
return 100.0 * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2)
def g1(self, x):
dif = [0, 0]
dif[1] = 200.0*(x[1] - pow(x[0], 2))
dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0)
return dif
def fg1(self, x):
return self.f1(x), self.g1(x)
def f3(self, x):
return x[1] + pow(x[1] - x[0], 2) * 1.0e-5
def g3(self, x):
dif = [0,0]
dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5
dif[1] = 1.0 - dif[0]
return dif
def fg3(self, x):
return self.f3(x), self.g3(x)
def f4(self, x):
return pow(x[0] + 1.0, 3) / 3.0 + x[1]
def g4(self, x):
dif = [0,0]
dif[0] = pow(x[0] + 1.0, 2)
dif[1] = 1.0
return dif
def fg4(self, x):
return self.f4(x), self.g4(x)
def f5(self, x):
return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \
1.5 * x[0] + 2.5 * x[1] + 1.0
def g5(self, x):
dif = [0,0]
v1 = np.cos(x[0] + x[1])
v2 = 2.0*(x[0] - x[1])
dif[0] = v1 + v2 - 1.5
dif[1] = v1 - v2 + 2.5
return dif
def fg5(self, x):
return self.f5(x), self.g5(x)
def f38(self, x):
return (100.0 * pow(x[1] - pow(x[0], 2), 2) +
pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) +
pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) +
pow(x[3] - 1.0, 2)) +
19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5
def g38(self, x):
dif = [0, 0, 0, 0]
dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) -
2.0 * (1.0 - x[0])) * 1.0e-5
dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) +
19.8 * (x[3] - 1.0)) * 1.0e-5
dif[2] = ( - 360.0 * x[2] * (x[3] - pow(x[2], 2)) -
2.0 * (1.0 - x[2])) * 1.0e-5
dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) +
19.8 * (x[1] - 1.0)) * 1.0e-5
return dif
def fg38(self, x):
return self.f38(x), self.g38(x)
def f45(self, x):
return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0
def g45(self, x):
dif = [0] * 5
dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0
dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0
dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0
dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0
dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0
return dif
def fg45(self, x):
return self.f45(x), self.g45(x)
# tests
def test_tnc1(self):
" TNC: test 1"
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc1b(self):
" TNC: test 1 (approx. gradient)"
x, bounds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc1c(self):
" TNC: test 1 (separate fprime)"
x, bounds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc2(self):
" TNC: test 2"
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None])
xopt = [-1.2210262419616387, 1.5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc3(self):
" TNC: test 3"
fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None])
xopt = [0, 0]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc4(self):
" TNC: test 4"
fg, x, bounds = self.fg4, [1.125,0.125], [(1, None), (0, None)]
xopt = [1, 0]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc5(self):
" TNC: test 5"
fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)]
xopt = [-0.54719755119659763, -1.5471975511965976]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc38(self):
" TNC: test 38"
fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4
xopt = [1]*4
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc45(self):
" TNC: test 45"
fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3),
(0, 4), (0, 5)]
xopt = [1, 2, 3, 4, 5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
class TestRosen(TestCase):
def test_hess(self):
"""Compare rosen_hess(x) times p with rosen_hess_prod(x,p) (ticket #1248)"""
x = np.array([3, 4, 5])
p = np.array([2, 2, 2])
hp = optimize.rosen_hess_prod(x, p)
dothp = np.dot(optimize.rosen_hess(x), p)
assert_equal(hp, dothp)
if __name__ == "__main__":
run_module_suite()
| 39.171674 | 84 | 0.491326 |
from numpy.testing import assert_raises, assert_allclose, \
assert_equal, assert_, TestCase, run_module_suite
from scipy import optimize
import numpy as np
from math import pow
class TestOptimize(TestCase):
def setUp(self):
self.F = np.array([[1,1,1],[1,1,0],[1,0,1],[1,0,0],[1,0,0]])
self.K = np.array([1., 0.3, 0.5])
self.startparams = np.zeros(3, np.float64)
self.solution = np.array([0., -0.524869316, 0.487525860])
self.maxiter = 1000
self.funccalls = 0
self.gradcalls = 0
self.trace = []
def func(self, x):
self.funccalls += 1
if self.funccalls > 6000:
raise RuntimeError("too many iterations in optimization routine")
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
f = logZ - np.dot(self.K, x)
self.trace.append(x)
return f
def grad(self, x):
self.gradcalls += 1
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.transpose(), p) - self.K
def hess(self, x):
log_pdot = np.dot(self.F, x)
logZ = np.log(sum(np.exp(log_pdot)))
p = np.exp(log_pdot - logZ)
return np.dot(self.F.T,
np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))
def hessp(self, x, p):
return np.dot(self.hess(x), p)
def test_cg(self, use_wrapper=False):
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='CG',
jac=self.grad, options=opts,
full_output=True,
retall=False)
fopt, func_calls, grad_calls, warnflag = \
info['fun'], info['nfev'], info['njev'], info['status']
else:
retval = optimize.fmin_cg(self.func, self.startparams, self.grad, (),
maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
assert_(self.funccalls == 9, self.funccalls)
assert_(self.gradcalls == 7, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[2:4],
[[0, -0.5, 0.5],
[0, -5.05700028e-01, 4.95985862e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs(self, use_wrapper=False):
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
params, info = optimize.minimize(self.func, self.startparams,
jac=self.grad, method='BFGS',
args=(), options=opts,
full_output=True,
retall=False)
fopt, gopt, Hopt, func_calls, grad_calls, warnflag = \
info['fun'], info['jac'], info['hess'], info['nfev'], \
info['njev'], info['status']
else:
retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 10, self.funccalls)
assert_(self.gradcalls == 8, self.gradcalls)
assert_allclose(self.trace[6:8],
[[0, -5.25060743e-01, 4.87748473e-01],
[0, -5.24885582e-01, 4.87530347e-01]],
atol=1e-14, rtol=1e-7)
def test_bfgs_nan(self):
func = lambda x: x
fprime = lambda x: np.ones_like(x)
x0 = [np.nan]
olderr = np.seterr(over='ignore')
try:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(np.isnan(func(x)))
finally:
np.seterr(**olderr)
def test_bfgs_numerical_jacobian(self):
epsilon = np.sqrt(np.finfo(float).eps) * np.random.rand(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_bfgs_infinite(self, use_wrapper=False):
func = lambda x: -np.e**-x
fprime = lambda x: -func(x)
x0 = [0]
olderr = np.seterr(over='ignore')
try:
if use_wrapper:
opts = {'disp': False}
x = optimize.minimize(func, x0, jac=fprime, method='BFGS',
args=(), options=opts)
else:
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert_(not np.isfinite(func(x)))
finally:
np.seterr(**olderr)
def test_powell(self, use_wrapper=False):
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='Powell',
options=opts,
full_output=True,
retall=False)
fopt, direc, numiter, func_calls, warnflag = \
info['fun'], info['direc'], info['nit'], info['nfev'], \
info['status']
else:
retval = optimize.fmin_powell(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, direc, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
#
# However, some leeway must be added: the exact evaluation
# count is sensitive to numerical error, and floating-point
# computations are not bit-for-bit reproducible across
# machines, and when using e.g. MKL, data alignment
# etc. affect the rounding error.
#
assert_(self.funccalls <= 116 + 20, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[34:39],
[[ 0.72949016, -0.44156936, 0.47100962],
[ 0.72949016, -0.44156936, 0.48052496],
[ 1.45898031, -0.88313872, 0.95153458],
[ 0.72949016, -0.44156936, 0.47576729],
[ 1.72949016, -0.44156936, 0.47576729]],
atol=1e-14, rtol=1e-7)
def test_neldermead(self, use_wrapper=False):
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
params, info = optimize.minimize(self.func, self.startparams,
args=(), method='Nelder-mead',
options=opts,
full_output=True,
retall=False)
fopt, numiter, func_calls, warnflag = \
info['fun'], info['nit'], info['nfev'], info['status']
else:
retval = optimize.fmin(self.func, self.startparams,
args=(), maxiter=self.maxiter,
full_output=True, disp=False, retall=False)
(params, fopt, numiter, func_calls, warnflag) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 167, self.funccalls)
assert_(self.gradcalls == 0, self.gradcalls)
assert_allclose(self.trace[76:78],
[[0.1928968 , -0.62780447, 0.35166118],
[0.19572515, -0.63648426, 0.35838135]],
atol=1e-14, rtol=1e-7)
def test_ncg(self, use_wrapper=False):
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
args=(), options=opts,
full_output=False, retall=False)
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hess(self, use_wrapper=False):
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess = self.hess,
args=(), options=opts,
full_output=False, retall=False)
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess = self.hess,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls)
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_ncg_hessp(self, use_wrapper=False):
if use_wrapper:
opts = {'maxit': self.maxiter, 'disp': False}
retval = optimize.minimize(self.func, self.startparams,
method='Newton-CG', jac=self.grad,
hess = self.hessp,
args=(), options=opts,
full_output=False, retall=False)
else:
retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,
fhess_p = self.hessp,
args=(), maxiter=self.maxiter,
full_output=False, disp=False,
retall=False)
params = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0
#assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
#assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
# Ensure that the function behaves the same; this is from Scipy 0.7.0
assert_allclose(self.trace[3:5],
[[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
[-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
atol=1e-6, rtol=1e-7)
def test_l_bfgs_b(self):
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxfun=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# Scipy 0.7.0. Don't allow them to increase.
assert_(self.funccalls == 7, self.funccalls)
assert_(self.gradcalls == 5, self.gradcalls)
assert_allclose(self.trace[3:5],
[[0. , -0.52489628, 0.48753042],
[0. , -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxfun=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxfun=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_minimize(self):
self.setUp()
self.test_bfgs(True)
self.setUp()
self.test_bfgs_infinite(True)
self.setUp()
self.test_cg(True)
self.setUp()
self.test_ncg(True)
self.setUp()
self.test_ncg_hess(True)
self.setUp()
self.test_ncg_hessp(True)
self.setUp()
self.test_neldermead(True)
self.setUp()
self.test_powell(True)
class TestOptimizeScalar(TestCase):
def setUp(self):
self.solution = 1.5
def fun(self, x):
return (x - 1.5)**2 - 0.8
def test_brent(self):
x = optimize.brent(self.fun)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack = (-3, -2))
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.brent(self.fun, full_output=True)
assert_allclose(x[0], self.solution, atol=1e-6)
x = optimize.brent(self.fun, brack = (-15, -1, 15))
assert_allclose(x, self.solution, atol=1e-6)
def test_fminbound(self):
x = optimize.fminbound(self.fun, 0, 1)
assert_allclose(x, 1, atol=1e-4)
x = optimize.fminbound(self.fun, 1, 5)
assert_allclose(x, self.solution, atol=1e-6)
x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))
assert_allclose(x, self.solution, atol=1e-6)
assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)
def test_fminbound_scalar(self):
assert_raises(ValueError, optimize.fminbound, self.fun,
np.zeros(2), 1)
x = optimize.fminbound(self.fun, 1, np.array(5))
assert_allclose(x, self.solution, atol=1e-6)
class TestTnc(TestCase):
def f1(self, x):
return 100.0 * pow((x[1] - pow(x[0], 2)), 2) + pow(1.0 - x[0], 2)
def g1(self, x):
dif = [0, 0]
dif[1] = 200.0*(x[1] - pow(x[0], 2))
dif[0] = -2.0 * (x[0] * (dif[1] - 1.0) + 1.0)
return dif
def fg1(self, x):
return self.f1(x), self.g1(x)
def f3(self, x):
return x[1] + pow(x[1] - x[0], 2) * 1.0e-5
def g3(self, x):
dif = [0,0]
dif[0] = -2.0 * (x[1] - x[0]) * 1.0e-5
dif[1] = 1.0 - dif[0]
return dif
def fg3(self, x):
return self.f3(x), self.g3(x)
def f4(self, x):
return pow(x[0] + 1.0, 3) / 3.0 + x[1]
def g4(self, x):
dif = [0,0]
dif[0] = pow(x[0] + 1.0, 2)
dif[1] = 1.0
return dif
def fg4(self, x):
return self.f4(x), self.g4(x)
def f5(self, x):
return np.sin(x[0] + x[1]) + pow(x[0] - x[1], 2) - \
1.5 * x[0] + 2.5 * x[1] + 1.0
def g5(self, x):
dif = [0,0]
v1 = np.cos(x[0] + x[1])
v2 = 2.0*(x[0] - x[1])
dif[0] = v1 + v2 - 1.5
dif[1] = v1 - v2 + 2.5
return dif
def fg5(self, x):
return self.f5(x), self.g5(x)
def f38(self, x):
return (100.0 * pow(x[1] - pow(x[0], 2), 2) +
pow(1.0 - x[0], 2) + 90.0 * pow(x[3] - pow(x[2], 2), 2) +
pow(1.0 - x[2], 2) + 10.1 * (pow(x[1] - 1.0, 2) +
pow(x[3] - 1.0, 2)) +
19.8 * (x[1] - 1.0) * (x[3] - 1.0)) * 1.0e-5
def g38(self, x):
dif = [0, 0, 0, 0]
dif[0] = (-400.0 * x[0] * (x[1] - pow(x[0], 2)) -
2.0 * (1.0 - x[0])) * 1.0e-5
dif[1] = (200.0 * (x[1] - pow(x[0], 2)) + 20.2 * (x[1] - 1.0) +
19.8 * (x[3] - 1.0)) * 1.0e-5
dif[2] = ( - 360.0 * x[2] * (x[3] - pow(x[2], 2)) -
2.0 * (1.0 - x[2])) * 1.0e-5
dif[3] = (180.0 * (x[3] - pow(x[2], 2)) + 20.2 * (x[3] - 1.0) +
19.8 * (x[1] - 1.0)) * 1.0e-5
return dif
def fg38(self, x):
return self.f38(x), self.g38(x)
def f45(self, x):
return 2.0 - x[0] * x[1] * x[2] * x[3] * x[4] / 120.0
def g45(self, x):
dif = [0] * 5
dif[0] = - x[1] * x[2] * x[3] * x[4] / 120.0
dif[1] = - x[0] * x[2] * x[3] * x[4] / 120.0
dif[2] = - x[0] * x[1] * x[3] * x[4] / 120.0
dif[3] = - x[0] * x[1] * x[2] * x[4] / 120.0
dif[4] = - x[0] * x[1] * x[2] * x[3] / 120.0
return dif
def fg45(self, x):
return self.f45(x), self.g45(x)
def test_tnc1(self):
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc1b(self):
x, bounds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, approx_grad=True,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-4,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc1c(self):
x, bounds = [-2, 1], ([-np.inf, None],[-1.5, None])
xopt = [1, 1]
x, nf, rc = optimize.fmin_tnc(self.f1, x, fprime=self.g1,
bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc2(self):
fg, x, bounds = self.fg1, [-2, 1], ([-np.inf, None], [1.5, None])
xopt = [-1.2210262419616387, 1.5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f1(x), self.f1(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc3(self):
fg, x, bounds = self.fg3, [10, 1], ([-np.inf, None], [0.0, None])
xopt = [0, 0]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f3(x), self.f3(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc4(self):
fg, x, bounds = self.fg4, [1.125,0.125], [(1, None), (0, None)]
xopt = [1, 0]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f4(x), self.f4(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc5(self):
fg, x, bounds = self.fg5, [0, 0], [(-1.5, 4),(-3, 3)]
xopt = [-0.54719755119659763, -1.5471975511965976]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f5(x), self.f5(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc38(self):
fg, x, bounds = self.fg38, np.array([-3, -1, -3, -1]), [(-10, 10)]*4
xopt = [1]*4
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f38(x), self.f38(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
def test_tnc45(self):
fg, x, bounds = self.fg45, [2] * 5, [(0, 1), (0, 2), (0, 3),
(0, 4), (0, 5)]
xopt = [1, 2, 3, 4, 5]
x, nf, rc = optimize.fmin_tnc(fg, x, bounds=bounds,
messages=optimize.tnc.MSG_NONE,
maxfun=200)
assert_allclose(self.f45(x), self.f45(xopt), atol=1e-8,
err_msg="TNC failed with status: " +
optimize.tnc.RCSTRINGS[rc])
class TestRosen(TestCase):
def test_hess(self):
x = np.array([3, 4, 5])
p = np.array([2, 2, 2])
hp = optimize.rosen_hess_prod(x, p)
dothp = np.dot(optimize.rosen_hess(x), p)
assert_equal(hp, dothp)
if __name__ == "__main__":
run_module_suite()
| true | true |
1c3b252e7303fe12913e9ea6bf8c6fcbbb64ce8d | 53,029 | py | Python | modest/substates/correlationvector.py | jtrunnels91/ModularEstimator | 1088f91440abd5a82d094311f51d0250ecca52e1 | [
"MIT"
] | null | null | null | modest/substates/correlationvector.py | jtrunnels91/ModularEstimator | 1088f91440abd5a82d094311f51d0250ecca52e1 | [
"MIT"
] | null | null | null | modest/substates/correlationvector.py | jtrunnels91/ModularEstimator | 1088f91440abd5a82d094311f51d0250ecca52e1 | [
"MIT"
] | null | null | null | ## @file CorrelationVector
# This package contains the #CorrelationVector class
import numpy as np
#from numpy import sin, cos, arcsin, arccos, arctan2, square, sqrt, abs, power
import matplotlib.pyplot as plt
from . import substate
from .. modularfilter import ModularFilter
from . oneDimensionalPositionVelocity import oneDPositionVelocity
from .. signals.oneDimensionalObject import oneDObjectMeasurement
from .. utils import covarianceContainer
from scipy.linalg import block_diag
from scipy.special import factorial
from math import isnan
## @class CorrelationVector
# @brief CorrelationVector estimates the correlation vector and delay between
# a signal and a time-delayed measurement of that signal
#
# @details
# This class contains an estimator which estimates the correlation vector
# between a signal (the #__trueSignal__) and measurements of that signal. This
# correlation vector is then used to estimate the delay between the
# #__trueSignal__ and the measurements of that signal.
#
# The estimator in this class currently assumes that the signal source is
# "distant," or infinitely far away. This implies that the unit vector to the
# signal source is perfectly known, and not changing. A later implementation
# could include the option of having a non-distant signal source, in which the
# unit vector is changing as a function of position and therefore uncertain.
#
# @note This class is essentially an implementation of the estimator presented in
# <a href="https://doi.org/10.2514/1.G002650">
# Recursive Range Estimation Using Astrophysical Signals of Opportunity</a>,
# J. Runnels, D. Gebre, Journal of Guidance, Control and Dynamics, 2017. Some
# equations from the paper are included in the class documentation for
# reference. A more detailed discussion and derivation of the estimator can
# be found in the journal article..
class CorrelationVector(substate.SubState):
## @fun #__init__ is responsible for initializing a correlation vector
# estimator
#
# @details The primary function of the #__init__ method is to initialize
# the correlation vector estimator, and store the relevant user inputs. A
# few key user inputs are required in order to initialize the filter.
# Additionally, because the algorithm is relatively complicated, there are
# a number of optional tuning parameters which may be inputed at
# initialization.
#
# In general, the parameters which are required inputs are the ones that
# are critical for initialization of the filter, and should not be changed
# during the course of the filter's lifetime. These inputs are stored as
# "private" variables; indicating that the should not be changed during
# the object's lifetime.
#
# The optional inputs, on the other hand, are inputs which are used in the
# estimation functions (#timeUpdate, #getMeasurementMatrices, etc.).
# These parameters could conceivably be changed during the lifetime of the
# filter without causing problems, and the user may want to change them
# depending on external factors. These parameters are initalized with
# default values, and stored as public variables that the user can in
# theory change.
#
# There are also a set of class variables which are publicly accessable
# and which hold the most recent state estimate. These exist primarily
# for convenience, and are never actually used within the class.
# Modifying them will have no affect on the state estimates. The only way
# to modify a state estimate is through the #storeStateVector method.
#
# The #__init__ method also checks the user inputs to ensure that they are
# consistent with how they will be used in the class (where applicable).
#
# The trueSignal input is checked to see whether it has the following
# methods:
# - flux()
# - signalID()
# - unitVec()
#
# @param trueSignal An object that describes the signal for which
# correlation should be estimated.
# @param filterOrder The number of bins or "taps" in the correlation vector
# @param dT The sampling period, or time-step between bins in the
# correlation vector
#
# @param #t (optional) The initial starting time. If no value is passed,
# initialized to zero by default.
# @param #correlationVector (optional) The initial value of the
# correlation vector. If not supplied, the correlation vector will be
# initialized based on the filter #__dT__ and maximum flux of
# the #__trueSignal__.
# @param #correlationVectorCovariance (optional) The initial value of the
# correlation vector estimate covariance. If not supplied, the covariance
# matrix will be initialized based on the filter #__dT__ and maximum flux
# of #__trueSignal__.
# @param #signalDelay (optional) The initial estimate of delay between
# the #__trueSignal__ and the signal measurements. If not supplied,
# #signalDelay is initialized to zero.
# @param #delayVar (optional) The variance of the estimate of delay
# @param #aPriori (optional) Indicates whether initial estimates are a
# priori or a posteriori. Default=True
#
# @param #centerPeak (optional) Boolean indicating whether the correlation
# vector should be "shifted" after each update to keep the peak centered
# at the zero index. Default is True.
# @param #peakFitPoints (optional) Number of points used on either side of
# max for quadratic fit in #computeSignalDelay. Minimum is 1, default is 1.
# @param #processNoise (optional) Scalar term of additional process noise
# added to covariance matrix in time update. Default is 1e-12
# @param #measurementNoiseScaleFactor (optional) Scale factor to inflate
# the measurement noise. Default is 1.
def __init__(
self,
trueSignal,
filterOrder,
dT,
t=0,
correlationVector=None,
correlationVectorCovariance=None,
signalTDOA=0,
TDOAVar=0,
aPriori=True,
centerPeak=True,
peakFitPoints=1,
processNoise=1e-12,
measurementNoiseScaleFactor=1,
peakLockThreshold=1,
covarianceStorage='covariance',
internalNavFilter=None,
navProcessNoise=1,
tdoaStdDevThreshold=None,
velStdDevThreshold=None,
tdoaNoiseScaleFactor=None,
velocityNoiseScaleFactor=None,
storeLastStateVectors=0,
vInitial=None,
aInitial=None,
gradInitial=None,
peakEstimator='EK'
):
print('updated correlation filter')
self.peakLockThreshold = peakLockThreshold
self.peakCenteringDT = 0
self.peakOffsetFromCenter = 0
self.navProcessNoise = navProcessNoise
"""
This is the default process noise that is injected into the navigation states as noise in the derivative of the highest order state.
"""
## @brief #__unitVecToSignal__ is a unit vector which points to the signal
# source
self.__unitVecToSignal__ = trueSignal.unitVec()
## @brief #__trueSignal__ is a signal object that contains the "true"
# signal for which the correlation vector is being estimated
self.__trueSignal__ = trueSignal
## @brief #__filterOrder__ is the number of "taps" in the estimated
# correlation vector, #correlationVector.
self.__filterOrder__ = filterOrder
## @brief #__dT__ is the "sample period" or "bin size" of the estimated
# correlation vector
self.__dT__ = dT
## @brief #t The current time
self.t = t
## @brief #aPriori Indicates whether the current state vector is the
# result of a time update (#aPriori = True) or a measurement update
# (#aPriori = False)
self.aPriori = aPriori
if correlationVector is None:
correlationVector = (
np.ones(self.__filterOrder__) *
self.__trueSignal__.peakAmplitude * self.__dT__
)
## @brief #correlationVector is the current estimate of the
# correlation vector between the incoming signal measurements and the
# #__trueSignal__
self.correlationVector = correlationVector
if correlationVectorCovariance is None:
if covarianceStorage == 'covariance':
correlationVectorCovariance = (
np.eye(self.__filterOrder__) *
np.square(self.__trueSignal__.peakAmplitude * self.__dT__)
)
elif covarianceStorage == 'cholesky':
correlationVectorCovariance = (
np.eye(self.__filterOrder__) *
self.__trueSignal__.peakAmplitude * self.__dT__
)
# Store the correlation vector covariance in a container
## @brief #correlationVectorCovariance is the covariance matrix of the
# correlation vector estimate, #correlationVector
self.correlationVectorCovariance = correlationVectorCovariance
## @brief #signalDelay is the current estimate of the delay between
# the incoming signal measurements and the #__trueSignal__
self.signalTDOA = signalTDOA
## @brief #delayVar is the variance of the signal delay estimate
# #signalDelay
self.TDOAVar = TDOAVar
## @brief #centerPeak indicates whether the correlation vector is
# shifted to maintain the peak at the middle tap
self.centerPeak = centerPeak
## @brief #peakLock indicates whether the current estimate of
# correlation vector and peak location is accurate enough to "know"
# that we've locked on to the correct peak.
self.peakLock = False
## @brief #peakFitPoints is a variable which controls the number of
# points used for quadratically estimating the location of the
# correlation vector peak
self.peakFitPoints = peakFitPoints
## @brief #processNoise is the scalar value used to generate an
# additional process noise term in #timeUpdate.
self.processNoise = processNoise
## @brief #measurementNoiseScaleFactor is a factor used to scale the
# measurement noise matrix. The default value is 1 (no scaling).
self.measurementNoiseScaleFactor = measurementNoiseScaleFactor
self.peakEstimator = peakEstimator
"""
String that determines which algorithm is used to estimate peak. Use either EK (extended Kalman Filter) or UK (Unscented)
"""
self.__halfLength__ = int(np.ceil(self.__filterOrder__ / 2))
self.__halfLengthSeconds__ = self.__halfLength__ * self.__dT__
xAxis = np.linspace(0, self.__filterOrder__-1, self.__filterOrder__)
self.xAxis = xAxis * self.__dT__
self.__xVec__ = np.linspace(
0,
self.peakFitPoints * 2,
(self.peakFitPoints * 2) + 1
)
self.internalNavFilter = internalNavFilter
print(internalNavFilter)
if self.internalNavFilter == 'none':
self.internalNavFilter = None
self.INF_type = 'none'
elif self.internalNavFilter == 'deep':
self.INF_type = 'deep'
elif self.internalNavFilter:
self.INF_type = 'external'
else:
self.internalNavFilter = None
self.INF_type = 'none'
stateVector = correlationVector
svCovariance = correlationVectorCovariance
if self.INF_type == 'deep':
if not vInitial:
raise ValueError('In order to use the deep internal navigation filter, you must initialize. Filter expects to receive at least vInitial, but received None')
self.velocity = vInitial['value']
self.velocityStdDev = np.sqrt(vInitial['var'])
if not aInitial:
self.navVectorLength = 1
navVector = np.zeros(1)
navVector[0] = vInitial['value']
navVar = np.zeros([1,1])
navVar[0,0] = vInitial['var']
elif not gradInitial:
self.acceleration = aInitial['value']
self.accelerationStdDev = np.sqrt(aInitial['var'])
self.navVectorLength = 2
navVector = np.zeros(2)
navVector[0] = vInitial['value']
navVector[1] = aInitial['value']
navVar = np.zeros([2,2])
navVar[0,0] = vInitial['var']
navVar[1,1] = aInitial['var']
else:
self.acceleration = aInitial['value']
self.accelerationStdDev = np.sqrt(aInitial['var'])
self.gradient = gradInitial['value']
self.gradientStdDev = np.sqrt(gradInitial['var'])
self.navVectorLength = 3
navVector = np.zeros(3)
navVector[0] = vInitial['value']
navVector[1] = aInitial['value']
navVector[2] = gradInitial['value']
navVar = np.zeros([3,3])
navVar[0,0] = vInitial['var']
navVar[1,1] = aInitial['var']
navVar[2,2] = gradInitial['var']
stateVector = np.append(stateVector,navVector)
svCovariance = block_diag(svCovariance, navVar)
svCovariance = covarianceContainer(
svCovariance,
covarianceStorage
)
self.mostRecentF = np.eye(self.__filterOrder__)
self.stateVector = stateVector
super().__init__(
stateDimension=len(stateVector),
stateVectorHistory={
't': t,
'stateVector': stateVector,
'covariance': svCovariance,
'aPriori': aPriori,
'signalTDOA': signalTDOA,
'TDOAVar': TDOAVar,
'xAxis': self.xAxis,
'stateVectorID': -1
},
storeLastStateVectors=storeLastStateVectors
)
self.tdoaStdDevThreshold = tdoaStdDevThreshold
self.velStdDevThreshold = velStdDevThreshold
self.tdoaNoiseScaleFactor = tdoaNoiseScaleFactor
self.velocityNoiseScaleFactor = velocityNoiseScaleFactor
if self.INF_type == 'external':
self.navState = self.internalNavFilter.subStates['oneDPositionVelocity']['stateObject']
return
##
# @name Mandatory SubState Functions
# The following functions are required in order for this class to be used
# as a substate in ModularFilter. The inside of the functions may be
# changed or updated, but their "black box" behavior must remain the
# same; i.e. they must still perform the same essential functions and
# return the same things.
# @{
## @fun #storeStateVector stores an updated estimate of the state vector
def storeStateVector(
self,
svDict
):
# Unpack updated state vector values
self.t = svDict['t']
self.aPriori = svDict['aPriori']
# Compute new estimate of delay based on new state vector, store in
# svDict and local attributes
if not svDict['aPriori']:
self.correlationVector = svDict['stateVector'][0:self.__filterOrder__]
self.correlationVectorCovariance = svDict['covariance']
self.stateVector = svDict['stateVector']
if self.peakEstimator == 'UK':
tdoaDict = self.estimateSignalTDOA_UT(
self.correlationVector,
self.correlationVectorCovariance
)
elif self.peakEstimator == 'EK':
tdoaDict = self.estimateSignalTDOA_EK(
self.correlationVector,
self.correlationVectorCovariance
)
else:
raise ValueError('Unrecougnized peak finding algorithm %s' %self.peakEstimator)
newTDOA = (
(
tdoaDict['meanTDOA']
) *
self.__dT__
) + self.peakCenteringDT
newTDOAVar = tdoaDict['varTDOA'] * np.square(self.__dT__)
if not isnan(newTDOA) and not isnan(newTDOAVar):
self.signalTDOA = newTDOA
self.TDOAVar = newTDOAVar
svDict['signalTDOA'] = self.signalTDOA
svDict['TDOAVar'] = self.TDOAVar
if self.INF_type == 'external':
if (
(np.sqrt(self.TDOAVar) < (self.tdoaStdDevThreshold))
or (self.tdoaStdDevThreshold == 0)
):
self.internalNavFilter.measurementUpdateEKF(
{'position': {
'value': self.signalTDOA,
'var': self.TDOAVar * self.tdoaNoiseScaleFactor
}},
'oneDPositionVelocity'
)
else:
self.internalNavFilter.measurementUpdateEKF(
{}, ''
)
if self.peakLock is True and self.centerPeak is True:
self.peakOffsetFromCenter = tdoaDict['meanTDOA'] - self.__halfLength__ + 1
# self.peakOffsetFromCenter = np.mod(tdoaDict['meanTDOA'], self.__dT__)
# print(self.peakOffsetFromCenter)
else:
self.peakOffsetFromCenter = 0
else:
# if self.peakLock is True and self.centerPeak is True:
# svDict['stateVector'] = self.correlationVector
# else:
# self.correlationVector = svDict['stateVector']
# if self.peakOffsetFromCenter != 0:
# FLDict = self.buildFLMatrices(
# -self.peakOffsetFromCenter*self.__dT__,
# self.correlationVector
# )
# self.correlationVector = FLDict['F'].dot(self.correlationVector)
# self.peakOffsetFromCenter = 0
# self.correlationVector = svDict['stateVector'][0:self.__filterOrder__]
self.correlationVector = self.mostRecentF.dot(self.correlationVector)
svDict['stateVector'][0:self.__filterOrder__] = self.correlationVector
self.stateVector = svDict['stateVector']
self.correlationVectorCovariance = svDict['covariance']
if self.peakEstimator == 'UK':
tdoaDict = self.estimateSignalTDOA_UT(
self.correlationVector,
self.correlationVectorCovariance
)
elif self.peakEstimator == 'EK':
tdoaDict = self.estimateSignalTDOA_EK(
self.correlationVector,
self.correlationVectorCovariance
)
else:
raise ValueError('Unrecougnized peak finding algorithm %s' %self.peakEstimator)
# newTDOA = (
# (
# tdoaDict['meanTDOA']
# ) *
# self.__dT__
# ) + self.peakCenteringDT
newTDOAVar = tdoaDict['varTDOA'] * np.square(self.__dT__)
# self.signalTDOA = newTDOA
self.TDOAVar = newTDOAVar
svDict['signalTDOA'] = self.signalTDOA
svDict['TDOAVar'] = self.TDOAVar
self.peakOffsetFromCenter = 0
svDict['xAxis'] = self.xAxis + self.peakCenteringDT
# svDict['xAxis'] = self.xAxis - self.signalTDOA
tdoaSTD = np.sqrt(self.TDOAVar)
if tdoaSTD < (self.peakLockThreshold * self.__dT__):
if not self.peakLock:
print(
'Substate %s reached peak lock at time %s'
%(self.__trueSignal__.name, self.t)
)
self.peakLock = True
else:
if self.peakLock and tdoaSTD > (self.peakLockThreshold * self.__dT__ * 1.1):
print(
'Substate %s lost peak lock at time %s'
%(self.__trueSignal__.name, self.t)
)
self.peakLock = False
self.peakOffsetFromCenter = 0
if self.INF_type == 'deep':
fO = self.__filterOrder__
currentV = self.stateVector[fO]
currentVStdDev = np.sqrt(self.correlationVectorCovariance[fO,fO].value)
self.velocity = currentV
self.velocityStdDev = currentVStdDev
svDict['velocity'] = {'value':currentV, 'stddev': currentVStdDev}
if self.navVectorLength == 2:
currentA = self.stateVector[fO+1]
currentAStdDev = np.sqrt(self.correlationVectorCovariance[fO+1,fO+1].value)
svDict['acceleration'] = {'value':currentA, 'stddev': currentAStdDev}
self.acceleration = currentA
self.accelerationStdDev = currentAStdDev
elif self.navVectorLength == 3:
currentA = self.stateVector[fO+1]
currentAStdDev = np.sqrt(self.correlationVectorCovariance[fO+1,fO+1].value)
svDict['acceleration'] = {'value':currentA, 'stddev': currentAStdDev}
self.acceleration = currentA
self.accelerationStdDev = currentAStdDev
currentGrad = self.stateVector[fO+2]
currentGradStdDev = np.sqrt(self.correlationVectorCovariance[fO+2,fO+2].value)
svDict['aGradient'] = {'value':currentGrad, 'stddev': currentGradStdDev}
self.gradient = currentGrad
self.gradientStdDev = currentGradStdDev
elif self.INF_type == 'external':
self.velocity = self.navState.currentVelocity
self.velocityStdDev = np.sqrt(self.navState.velocityVar)
super().storeStateVector(svDict)
return
## @fun #timeUpdate returns the matrices for performing the correlation
# vector time update.
#
# @details This function calls the #buildTimeUpdateMatrices method to
# generate the time-update matrices.
#
# @param self The object pointer
# @param dT The amount of time ellapsed over which the time update is to
# be performed
# @param dynamics A dictionary containing the dynamics for the time update
# (e.g. velocity)
#
# @sa SubStates.SubState.timeUpdate
def timeUpdate(
self,
dT,
dynamics=None
):
if self.INF_type != 'deep':
timeUpdateMatrices = self.buildTimeUpdateMatrices(
dT, dynamics, self.correlationVector
)
L = timeUpdateMatrices['L']
Q = timeUpdateMatrices['Q']
Qmat = (
np.outer(L, L) * Q +
(
np.eye(self.__filterOrder__) *
self.processNoise * dT *
np.square(self.__trueSignal__.peakAmplitude * self.__dT__)
)
)
if dynamics is not None and 'acceleration' in dynamics:
oneDAcceleration = (
dynamics['acceleration']['value'].dot(self.__unitVecToSignal__) /
self.speedOfLight()
)
oneDAccelerationVar = (
self.__unitVecToSignal__.dot(
dynamics['acceleration']['value'].dot(
self.__unitVecToSignal__.transpose()
)
) / np.square(self.speedOfLight())
)
else:
oneDAcceleration = 0
oneDAccelerationVar = self.navProcessNoise
if self.INF_type == 'external':
self.internalNavFilter.timeUpdateEKF(
dT,
dynamics = {
'oneDPositionVelocityacceleration': {
'value': oneDAcceleration,
'var': oneDAccelerationVar
}
}
)
else:
timeUpdateMatrices = self.buildDeepTimeUpdateMatrices(dT, dynamics, self.correlationVector)
# if dynamics is not None and 'accelerationGrad' in dynamics:
# navProcessNoise = (
# dynamics['accelerationGrad']['value'].dot(self.__unitVecToSignal__) /
# self.speedOfLight()
# )
# oneDAccelerationGradVar = (
# self.__unitVecToSignal__.dot(
# dynamics['accelerationGrad']['value'].dot(
# self.__unitVecToSignal__.transpose()
# )
# ) / np.square(self.speedOfLight())
# )
# else:
L = timeUpdateMatrices['L']
Qmat = (
np.outer(L, L) * self.navProcessNoise + (
(
block_diag(np.eye(self.__filterOrder__),np.zeros([self.navVectorLength,self.navVectorLength])) *
self.processNoise * dT *
np.square(self.__trueSignal__.flux * self.__dT__)
)
)
)
self.mostRecentF = timeUpdateMatrices['F'][0:self.__filterOrder__, 0:self.__filterOrder__]
return {'F': timeUpdateMatrices['F'], 'Q': Qmat}
def buildDeepTimeUpdateMatrices(self,dT, dynamics, h):
FMatrixShift = -self.peakOffsetFromCenter
filterOrder = self.__filterOrder__
# Initialize empty matricies
F = np.zeros([filterOrder + self.navVectorLength, filterOrder+self.navVectorLength])
halfLength = self.__halfLength__
indexDiff = dT/self.__dT__
peakShift = self.stateVector[self.__filterOrder__] * indexDiff
# Velocity term
self.peakCenteringDT = (
self.peakCenteringDT + self.stateVector[self.__filterOrder__] * dT
)
if self.navVectorLength > 1:
# Acceleration term (if acceleration is being estimated)
self.peakCenteringDT = (
self.peakCenteringDT +
self.stateVector[self.__filterOrder__ + 1] * np.power(dT,2)/2
)
peakShift = (
peakShift + self.stateVector[self.__filterOrder__ + 1]*np.power(indexDiff,2)/2
)
if self.navVectorLength > 2:
# Acceleration gradient term
self.peakCenteringDT = (
self.peakCenteringDT +
self.stateVector[self.__filterOrder__] *
self.stateVector[self.__filterOrder__ + 2] *
np.power(dT,3)/6
)
peakShift = (
peakShift +
self.stateVector[self.__filterOrder__] *
self.stateVector[self.__filterOrder__ + 2] *
np.power(indexDiff,3)/6
)
self.peakCenteringDT = self.peakCenteringDT + (self.peakOffsetFromCenter*self.__dT__)
# Build arrays of indicies from which to form the sinc function
if np.mod(filterOrder, 2) == 0:
baseVec = (
np.linspace(
1 - halfLength,
halfLength,
filterOrder
)
)
else:
baseVec = (
np.linspace(
1 - halfLength,
halfLength - 1,
filterOrder
)
)
# Compute the sinc function of the base vector
sincBase = np.sinc(baseVec + FMatrixShift)
diffBase = np.zeros_like(sincBase)
for i in range(len(baseVec)):
diffBase[i] = self.sincDiff(baseVec[i] + peakShift)
sincBase = np.roll(sincBase, 1 - int(halfLength))
diffBase = np.roll(diffBase, 1 - int(halfLength))
for i in range(len(sincBase)):
currentDiff = np.roll(diffBase, i).dot(h)
F[i, 0:filterOrder] = np.roll(sincBase, i)
F[i, filterOrder] = currentDiff * indexDiff
if self.navVectorLength > 1:
F[i, filterOrder+1] = currentDiff * np.power(indexDiff, 2)/2
if self.navVectorLength > 2:
F[i, filterOrder+2] = (
currentDiff *
self.stateVector[filterOrder] *
np.power(indexDiff, 3)/6
)
L = np.zeros(filterOrder+self.navVectorLength)
if self.navVectorLength == 1:
F[filterOrder, filterOrder] = 1
L[filterOrder] = dT
elif self.navVectorLength == 2:
F[filterOrder, filterOrder] = 1.0
F[filterOrder, filterOrder+1] = dT
F[filterOrder+1, filterOrder+1] = 1.0
L[filterOrder] = np.power(dT, 2)/2
L[filterOrder + 1] = dT
elif self.navVectorLength == 3:
vCurrent = self.stateVector[filterOrder]
aCurrent = self.stateVector[filterOrder + 1]
gradCurrent = self.stateVector[filterOrder + 2]
F[filterOrder,filterOrder] = 1 + (gradCurrent * np.power(dT,2)/2)
F[filterOrder,filterOrder+1] = dT
F[filterOrder,filterOrder+2] = vCurrent * np.power(dT,2)/2
F[filterOrder+1,filterOrder+1] = 1
F[filterOrder+1,filterOrder+2] = vCurrent * dT
F[filterOrder+2,filterOrder+2] = 1
L[filterOrder] = vCurrent * np.power(dT,3)/6
L[filterOrder + 1] = vCurrent * np.power(dT,2)/2
L[filterOrder + 2] = dT
diffBase = np.zeros_like(sincBase)
for i in range(len(baseVec)):
diffBase[i] = self.sincDiff(baseVec[i])
diffBase = np.roll(diffBase, 1 - int(halfLength))
for i in range(len(baseVec)):
L[i] = (
np.roll(diffBase, i).dot(h) *
np.power(indexDiff,self.navVectorLength+1)/factorial(self.navVectorLength+1)
)
# # Setting L to zero for test purposes only
# L = np.zeros(filterOrder+self.navVectorLength)
return({'F':F, 'L':L})
def getMeasurementMatrices(
self,
measurement,
source=None
):
if (
(source.signalID() == self.__trueSignal__.signalID()) and
('t' in measurement)
):
measurementMatrices = self.getTOAMeasurementMatrices(
measurement,
self.correlationVector
)
HDict = {'correlationVector': measurementMatrices['H']}
RDict = {'correlationVector': measurementMatrices['R']}
dyDict = {'correlationVector': measurementMatrices['dY']}
else:
HDict = {'': None}
RDict = {'': None}
dyDict = {'': None}
measurementMatricesDict = {
'H': HDict,
'R': RDict,
'dY': dyDict
}
return measurementMatricesDict
## @}
## @fun #buildTimeUpdateMatrices constructs the correlation vector time
# update matrices
#
# @details The #buildTimeUpdateMatrices method constructs the matrices required to perform the time update of the correlation vector sub-state.
#
# The time update matrices are a function of the estimated spacecraft velocity (\f$\mathbf{v}\f$), velocity variance (\f$\mathbf{P}_{\mathbf{v}}\f$), and the elapsed time over which the time update occurs (\f$\Delta T\f$). The matrices are constructed as follows:
#
# \f[
# \mathbf{F}_{j \to k} = \begin{bmatrix}
# \textrm{sinc}(\hat{\delta}) & \hdots & \textrm{sinc}(\hat{\delta} + N - 1) \\
# \vdots & \ddots & \vdots \\
# \textrm{sinc}(\hat{\delta} - N + 1) & \hdots & \textrm{sinc}(\hat{\delta})
# \end{bmatrix}
# \f]
#
# \f[
# \mathbf{L}_{j} = \begin{bmatrix}
# \frac{\textrm{cos}}{(\hat{\delta})} - \frac{\textrm{sin}}{(\hat{\delta}^2)} & \hdots \\
# \vdots & \ddots \\
# \end{bmatrix} \sv[timeIndex = k]
# \f]
#
# \f[
# Q_{\delta} = \left(\frac{(k-j)}{c}\right)^2
# {\LOSVec[S]}^T \mathbf{P}_{\mathbf{v}} \LOSVec[S]
# \f]
#
# where
#
# \f[
# \hat{\delta}_{j \to k} = \frac{\mathbf{v} \LOSVec[S] \Delta T}{c T}
# \f]
#
# @param self The object pointer
# @param deltaT The amount of time over which the time update is occuring
# @param dynamics A dictionary containing the relevant dynamics for the
# time update
# @param h The current correlation vector
#
# @returns A dictionary containing the matrices \f$\mathbf{F}\f$,
# \f$\mathbf{L}\f$, and the scalar \f$Q\f
def buildTimeUpdateMatrices(
self,
deltaT,
dynamics,
h
):
indexDiff = deltaT/self.__dT__
if (
(dynamics is not None and 'velocity' in dynamics) or
(
self.INF_type == 'external' and
(
(np.sqrt(self.navState.velocityVar) < self.velStdDevThreshold) or
self.velStdDevThreshold == 0
)
)
):
if 'velocity' in dynamics:
velocity = dynamics['velocity']['value']
vVar = dynamics['velocity']['var'] * self.velocityNoiseScaleFactor
peakShift = (
(velocity.dot(self.__unitVecToSignal__) * indexDiff) /
self.speedOfLight()
)
# velocityTDOA = peakShift * self.__dT__
velocityTDOA = (
velocity.dot(self.__unitVecToSignal__) * deltaT /
self.speedOfLight()
)
Q = (
self.__unitVecToSignal__.dot(
vVar
).dot(self.__unitVecToSignal__) *
np.square(indexDiff / self.speedOfLight())
)
tdoaQ = (
self.__unitVecToSignal__.dot(vVar
).dot(self.__unitVecToSignal__) *
np.square(deltaT/self.speedOfLight()))
elif self.INF_type=='external':
peakShift = self.navState.currentVelocity * indexDiff
velocityTDOA = self.navState.currentVelocity * deltaT
Q = self.navState.velocityVar * np.square(indexDiff) * self.velocityNoiseScaleFactor
tdoaQ = self.navState.velocityVar * np.square(deltaT) * self.velocityNoiseScaleFactor
else:
velocityTDOA = 0
peakShift = 0
Q = self.navProcessNoise * np.power(indexDiff,4)/4
tdoaQ = self.navProcessNoise * np.power(deltaT,4)/4
FMatrixShift = -self.peakOffsetFromCenter # - peakShift
self.signalTDOA = (
self.signalTDOA +
velocityTDOA
)
self.TDOAVar = self.TDOAVar + tdoaQ
self.peakCenteringDT = (
self.peakCenteringDT + velocityTDOA +
(self.peakOffsetFromCenter*self.__dT__)
)
# Initialize empty matricies
F = np.zeros([self.__filterOrder__, self.__filterOrder__])
L = np.zeros([self.__filterOrder__, self.__filterOrder__])
# Build arrays of indicies from which to form the sinc function
if np.mod(self.__filterOrder__, 2) == 0:
baseVec = (
np.linspace(
1 - self.__halfLength__,
self.__halfLength__,
self.__filterOrder__
)
)
else:
baseVec = (
np.linspace(
1 - self.__halfLength__,
self.__halfLength__ - 1,
self.__filterOrder__
)
)
# Compute the sinc function of the base vector
sincBase = np.sinc(baseVec + FMatrixShift)
diffBase = np.zeros_like(sincBase)
for i in range(len(baseVec)):
diffBase[i] = self.sincDiff(baseVec[i] + peakShift)
sincBase = np.roll(sincBase, 1 - int(self.__halfLength__))
diffBase = np.roll(diffBase, 1 - int(self.__halfLength__))
for i in range(len(F)):
F[i] = np.roll(sincBase, i)
L[i] = np.roll(diffBase, i)
L = L.dot(h)
# else:
# # If no velocity was included in dynamics, then do nothing during
# # time update
# F = np.eye(self.__filterOrder__)
# L = np.zeros(self.__filterOrder__)
# Q = 0
timeUpdateDict = {
'F': F,
'L': L,
'Q': Q
}
return(timeUpdateDict)
def buildFLMatrices(self, peakShift, h):
# Initialize empty matricies
F = np.zeros([self.__filterOrder__, self.__filterOrder__])
L = np.zeros([self.__filterOrder__, self.__filterOrder__])
# Build arrays of indicies from which to form the sinc function
if np.mod(self.__filterOrder__, 2) == 0:
baseVec = (
np.linspace(
1 - self.__halfLength__,
self.__halfLength__,
self.__filterOrder__
)
)
else:
baseVec = (
np.linspace(
1 - self.__halfLength__,
self.__halfLength__ - 1,
self.__filterOrder__
)
)
# Compute the sinc function of the base vector
sincBase = np.sinc(baseVec + peakShift)
diffBase = np.zeros_like(sincBase)
for i in range(len(baseVec)):
diffBase[i] = self.sincDiff(baseVec[i] + peakShift)
sincBase = np.roll(sincBase, 1 - int(self.__halfLength__))
diffBase = np.roll(diffBase, 1 - int(self.__halfLength__))
for i in range(len(F)):
F[i] = np.roll(sincBase, i)
L[i] = np.roll(diffBase, i)
L = L.dot(h)
return {'F':F, 'L':L}
## @}
## @{
# @name Functions Specific to #CorrelationVector
#
# The following remaining functions are not required in order for this
# class to be used as a SubState, and may be changed as needed,
# including inputs and outputs.
def getTOAMeasurementMatrices(
self,
measurement,
corrVec
):
photonTOA = measurement['t']['value']
adjustedTOA = photonTOA + self.peakCenteringDT
H = np.eye(self.__filterOrder__)
if self.INF_type == 'deep':
H = np.append(H, np.zeros([self.__filterOrder__, self.navVectorLength]), axis=1)
timeVector = np.linspace(
0,
(self.__filterOrder__ - 1),
self.__filterOrder__
)
timeVector = timeVector * self.__dT__
timeVector = (
timeVector + adjustedTOA
)
# if self.peakLock is True:
# timeVector = timeVector - self.signalDelay
signalTimeHistory = np.zeros(self.__filterOrder__)
halfDT = self.__dT__/2.0
# for timeIndex in range(len(timeVector)):
# signalTimeHistory[timeIndex] = (
# self.__trueSignal__.getSignal(timeVector[timeIndex]) *
# self.__dT__
# )
for timeIndex in range(len(timeVector)):
signalTimeHistory[timeIndex] = (
self.__trueSignal__.signalIntegral(
timeVector[timeIndex]-halfDT,
timeVector[timeIndex] + halfDT
)
)
# plt.plot(signalTimeHistory)
# plt.show(block=False)
# 1/0
# print(corrVec)
# print(signalTimeHistory)
dY = signalTimeHistory - corrVec
R = (
np.eye(self.__filterOrder__) *
#self.__trueSignal__.flux *
self.__trueSignal__.peakAmplitude *
self.__dT__ *
np.dot(corrVec, corrVec) *
self.measurementNoiseScaleFactor
)
measMatDict = {
'H': H,
'dY': dY,
'R': R
}
return measMatDict
## @fun #computeSignalTDOA computes the delay between the #__trueSignal__ and
# measurements based on a correlation vector
#
# @details The #computeSignalDelay function is a rudimentary function
# which takes an estimate of the correlation vector and uses it to
# estimate the location of the peak. It functions by finding the tap with
# the maximum value, and then fitting a quadratic to the points
# surrounding the maximum value tap. The number of points to which the
# quadratic is fitted is determined by the value of #peakFitPoints; the
# number of points is equal to \f$2 * n + 1\f$ where \f$n = \f$
# #peakFitPoints.
#
# The delay estimate that is returned is in units of #__dT__. So, a returned
# value of 2 would imply that the peak is located at 2, and therefore the
# delay corresponding to the correlation vector is 2 #__dT__.
#
# The returned delay may not include previously accumulated #signalDelay
# between the signal and the measurements. See the #storeStateVector
# function for more information on how the #signalDelay is stored and
# accumulated delay is accounted for.
#
# @param self The object pointer
# @param c The correlation vector
# @param P the correlation vector covariance matrix
#
# @return The estimate of the delay
def computeSignalTDOA(
self,
c,
P
):
# First estimate of peak location is the location of the max value
peakLocation = np.argmax(c)
# Next, we "roll" the correlation vector so that the values being
# fitted quadratically are the first 2 * peakFitPoints + 1 values
lowerBound = peakLocation - self.peakFitPoints
upperBound = lowerBound + (self.peakFitPoints * 2) + 1
if (lowerBound < 0) or (upperBound > self.__filterOrder__):
mySlice = range(lowerBound, upperBound)
slicedC = c.take(mySlice, mode='wrap')
slicedP = P.take(mySlice, axis=0, mode='wrap').take(mySlice, axis=1, mode='wrap')
else:
mySlice = slice(lowerBound, upperBound)
slicedC = c[mySlice]
slicedP = P[mySlice, mySlice]
# xVec is the vector of "x" values corresponding the "y" values to
# which the quadratic is being fit.
xVec = self.__xVec__
# xVec = xVec - rollFactor
xVec = xVec + lowerBound
# Get the quadratic function that fits the peak and surrounding values,
# and use it to estimate the location of the max
# print(slicedC)
if len(xVec) == 3:
TDOA = self.peakFinder(xVec, slicedC)
else:
quadraticVec = self.quadraticFit(xVec, slicedC)
try:
TDOA = (-quadraticVec[1] / (2 * quadraticVec[0]))
except:
TDOA = xVec[peakLocation]
return TDOA
## @fun #estimateSignalTDOA_UT uses a unscented tranform to estimate the
# delay corresponding to a correlation vector
#
# @details The #estimateSignalDelayUT method is responsible for computing
# the estimated value of delay corresponding to a correlation vector, as
# well as the variance of that estimate. These values are computed using
# a unscented transform (i.e. sigma-point) approach.
#
# The method receives the an estimate of the correlation vector, as well
# as the covariance matrix corresponding to that vector. From there it
# computes a set of n sigma points (where n is the length of the
# correlation vector), and for each of the generated sigma point vectors,
# it computes the peak location using the #computeSignalDelay method.
#
# @param self The object pointer
# @param h The correlation vector
# @param P The correlation vector covariance matrix
#
# @returns A dict containing the estimate of the peak location
# ("meanDelay") and the estimate variance ("varDelay")
def estimateSignalTDOA_UT(
self,
h,
P,
useMean=True
):
# Compute sigma points
hDimension = len(h)
maxHIndex = np.argmax(h)
rollAmount = -maxHIndex + self.__halfLength__
# rollAmount = 1
# hRolled = np.roll(h, rollAmount)
# PRolled = np.roll(np.roll(P.value, rollAmount, axis=0), rollAmount, axis=1)
# Compute the square root of P.
if P.form == 'covariance':
sqrtP = np.linalg.cholesky(
hDimension * P.value[0:self.__filterOrder__, 0:self.__filterOrder__]
)
elif P.form == 'cholesky':
# PVal = P.convertCovariance('covariance').value
# sqrtP = np.linalg.cholesky(hDimension * PVal)
sqrtP = P.value[0:self.__filterOrder__, 0:self.__filterOrder__] * np.sqrt(hDimension)
sigmaPoints = h + np.append(sqrtP, -sqrtP, axis=0)
# Append one more row of sigma points containing the unmodified estimate
sigmaPoints = np.append(np.array([h]), sigmaPoints, axis=0)
# Initiate vector to store the resulting peaks from each sigma point
sigmaPointResults = np.zeros(len(sigmaPoints))
# Compute the peak corresponding to each sigma point vector
for i in range(len(sigmaPoints)):
sigmaPointResults[i] = (
self.computeSignalTDOA(sigmaPoints[i], P.convertCovariance('covariance').value)
)
#meanTDOA = np.mean(sigmaPointResults)
meanTDOA = sigmaPointResults[0]
for i in range(len(sigmaPoints)):
if (meanTDOA - sigmaPointResults[i]) > self.__halfLength__:
sigmaPointResults[i] += self.__dimension__
elif (sigmaPointResults[i] - meanTDOA) > self.__halfLength__:
sigmaPointResults[i] -= self.__dimension__
# meanTDOA = self.computeSignalTDOA(h, P)
varTDOA = np.var(sigmaPointResults)
return {'meanTDOA': meanTDOA, 'varTDOA': varTDOA, 'sigmaPoints': sigmaPointResults}
def estimateSignalTDOA_EK(self, h, P):
if P.form == 'covariance':
P = P.value[0:self.__filterOrder__, 0:self.__filterOrder__]
elif P.form == 'cholesky':
P = P.convertCovariance('covariance').value[0:self.__filterOrder__, 0:self.__filterOrder__]
# First estimate of peak location is the location of the max value
peakLocation = np.argmax(h)
# Next, we "roll" the correlation vector so that the values being
# fitted quadratically are the first 3 values
lowerBound = peakLocation - 1
upperBound = lowerBound + (1 * 2) + 1
if (lowerBound < 0) or (upperBound > self.__filterOrder__):
mySlice = range(lowerBound, upperBound)
slicedC = h.take(mySlice, mode='wrap')
slicedP = P.take(mySlice, axis=0, mode='wrap').take(mySlice, axis=1, mode='wrap')
else:
mySlice = slice(lowerBound, upperBound)
slicedC = h[mySlice]
slicedP = P[mySlice, mySlice]
# xVec is the vector of "x" values corresponding the "y" values to
# which the quadratic is being fit.
xVec = self.__xVec__
xVec = xVec + lowerBound
# Get the quadratic function that fits the peak and surrounding values,
# and use it to estimate the location of the max
TDOA = self.peakFinder(xVec, slicedC)
jacobian = self.peakFinderJacobian(xVec, slicedC)
variance = jacobian.dot(slicedP).dot(jacobian.transpose())
return {'meanTDOA': TDOA, 'varTDOA': variance}
def speedOfLight(
self
):
return (299792)
@staticmethod
def sincDiff(x):
if np.abs(x) < 1e-100:
myDiff = 0.0
else:
piX = np.pi*x
# myDiff = np.pi * (
# (((np.pi * x) * np.cos(x * np.pi)) - np.sin(x * np.pi))
# /
# np.square(x * np.pi)
# )
myDiff = (piX*np.cos(piX) - np.sin(piX))/(np.pi * np.power(x,2))
# myDiff
return myDiff
@staticmethod
def quadraticFit(x, y):
X_T = np.array([np.power(x, 2), x, np.ones(len(x))])
X = X_T.transpose()
if len(x) < 3:
raise ValueError(
"Cannot fit a quadratic to less than three data points."
)
elif len(x) == 3:
# Note: Suprisingly, it is faster to directly invert the X matrix
# than it is to do a linear solve. Strange.
#coef = np.linalg.solve(X, y)
coef = np.linalg.inv(X).dot(y)
else:
#coef = np.linalg.solve(X_T.dot(X).dot(X_T), y)
coef = np.linalg.inv(X_T.dot(X)).dot(X_T).dot(y)
return coef
def initializeRealTimePlot(
self,
plotHandle=None,
axisHandle=None
):
super().initializeRealTimePlot(plotHandle, axisHandle)
self.RTPlotTDOA = self.RTPaxisHandle.scatter(
self.signalTDOA,
1
)
self.RTPlotTDOA_error, = self.RTPaxisHandle.plot(
[
self.signalTDOA - np.sqrt(self.TDOAVar),
self.signalTDOA + np.sqrt(self.TDOAVar)
],
[1,1]
)
return
def realTimePlot(
self,
normalized=True
):
if self.RTPlotHandle is None:
self.initializeRealTimePlot()
self.RTPlotTDOA.set_offsets([self.signalTDOA, 1])
self.RTPlotTDOA_error.set_data(
[
self.signalTDOA - np.sqrt(self.TDOAVar),
self.signalTDOA + np.sqrt(self.TDOAVar)
],
[1,1]
)
super().realTimePlot(normalized, substateRange = slice(0,self.__filterOrder__))
return
@staticmethod
def peakFinder(x,y):
x1 = x[0]
x2 = x[1]
x3 = x[2]
y1 = y[0]
y2 = y[1]
y3 = y[2]
x0 = (
-(y1*(np.square(x3) - np.square(x2)) + y2*(np.square(x1) - np.square(x3)) + y3*(np.square(x2) - np.square(x1)))
/
(2*(y1*(x2-x3) + y2*(x3-x1) + y3*(x1-x2)))
)
x0 = (
(
y1*(np.square(x2)-np.square(x3)) +
y2*(np.square(x3)-np.square(x1)) +
y3*(np.square(x1)-np.square(x2))
)
/
(2*(y1*(x2-x3) + y2*(x3-x1) + y3*(x1-x2)))
)
return(x0)
@staticmethod
def peakFinderJacobian(x,y):
x1 = x[0]
x2 = x[1]
x3 = x[2]
y1 = y[0]
y2 = y[1]
y3 = y[2]
A = np.square(x2) - np.square(x3)
B = np.square(x3) - np.square(x1)
C = np.square(x1) - np.square(x2)
D = x2-x3
E = x3-x1
# E = x1-x2
F = x1-x2
AE = A*E
AF = A*F
BD = B*D
BF = B*F
CD = C*D
CE = C*E
denom = 2*np.power(((D*y1) + (E*y2) + (F*y3)),2)
dT_dy1 = (
((AE - BD)*y2 + (AF - CD)*y3)
/
denom
)
dT_dy2 = (
((BD - AE)*y1 + (BF - CE)*y3)
/
denom
)
dT_dy3 = (
((CD - AF)*y1 + (CE - BF)*y2)
/
denom
)
return np.array([dT_dy1, dT_dy2, dT_dy3])
| 37.370684 | 268 | 0.561316 | s plt
from . import substate
from .. modularfilter import ModularFilter
from . oneDimensionalPositionVelocity import oneDPositionVelocity
from .. signals.oneDimensionalObject import oneDObjectMeasurement
from .. utils import covarianceContainer
from scipy.linalg import block_diag
from scipy.special import factorial
from math import isnan
ial=None,
gradInitial=None,
peakEstimator='EK'
):
print('updated correlation filter')
self.peakLockThreshold = peakLockThreshold
self.peakCenteringDT = 0
self.peakOffsetFromCenter = 0
self.navProcessNoise = navProcessNoise
)
elif covarianceStorage == 'cholesky':
correlationVectorCovariance = (
np.eye(self.__filterOrder__) *
self.__trueSignal__.peakAmplitude * self.__dT__
)
noise term in #timeUpdate.
self.processNoise = processNoise
## @brief #measurementNoiseScaleFactor is a factor used to scale the
# measurement noise matrix. The default value is 1 (no scaling).
self.measurementNoiseScaleFactor = measurementNoiseScaleFactor
self.peakEstimator = peakEstimator
self.__halfLength__ = int(np.ceil(self.__filterOrder__ / 2))
self.__halfLengthSeconds__ = self.__halfLength__ * self.__dT__
xAxis = np.linspace(0, self.__filterOrder__-1, self.__filterOrder__)
self.xAxis = xAxis * self.__dT__
self.__xVec__ = np.linspace(
0,
self.peakFitPoints * 2,
(self.peakFitPoints * 2) + 1
)
self.internalNavFilter = internalNavFilter
print(internalNavFilter)
if self.internalNavFilter == 'none':
self.internalNavFilter = None
self.INF_type = 'none'
elif self.internalNavFilter == 'deep':
self.INF_type = 'deep'
elif self.internalNavFilter:
self.INF_type = 'external'
else:
self.internalNavFilter = None
self.INF_type = 'none'
stateVector = correlationVector
svCovariance = correlationVectorCovariance
if self.INF_type == 'deep':
if not vInitial:
raise ValueError('In order to use the deep internal navigation filter, you must initialize. Filter expects to receive at least vInitial, but received None')
self.velocity = vInitial['value']
self.velocityStdDev = np.sqrt(vInitial['var'])
if not aInitial:
self.navVectorLength = 1
navVector = np.zeros(1)
navVector[0] = vInitial['value']
navVar = np.zeros([1,1])
navVar[0,0] = vInitial['var']
elif not gradInitial:
self.acceleration = aInitial['value']
self.accelerationStdDev = np.sqrt(aInitial['var'])
self.navVectorLength = 2
navVector = np.zeros(2)
navVector[0] = vInitial['value']
navVector[1] = aInitial['value']
navVar = np.zeros([2,2])
navVar[0,0] = vInitial['var']
navVar[1,1] = aInitial['var']
else:
self.acceleration = aInitial['value']
self.accelerationStdDev = np.sqrt(aInitial['var'])
self.gradient = gradInitial['value']
self.gradientStdDev = np.sqrt(gradInitial['var'])
self.navVectorLength = 3
navVector = np.zeros(3)
navVector[0] = vInitial['value']
navVector[1] = aInitial['value']
navVector[2] = gradInitial['value']
navVar = np.zeros([3,3])
navVar[0,0] = vInitial['var']
navVar[1,1] = aInitial['var']
navVar[2,2] = gradInitial['var']
stateVector = np.append(stateVector,navVector)
svCovariance = block_diag(svCovariance, navVar)
svCovariance = covarianceContainer(
svCovariance,
covarianceStorage
)
self.mostRecentF = np.eye(self.__filterOrder__)
self.stateVector = stateVector
super().__init__(
stateDimension=len(stateVector),
stateVectorHistory={
't': t,
'stateVector': stateVector,
'covariance': svCovariance,
'aPriori': aPriori,
'signalTDOA': signalTDOA,
'TDOAVar': TDOAVar,
'xAxis': self.xAxis,
'stateVectorID': -1
},
storeLastStateVectors=storeLastStateVectors
)
self.tdoaStdDevThreshold = tdoaStdDevThreshold
self.velStdDevThreshold = velStdDevThreshold
self.tdoaNoiseScaleFactor = tdoaNoiseScaleFactor
self.velocityNoiseScaleFactor = velocityNoiseScaleFactor
if self.INF_type == 'external':
self.navState = self.internalNavFilter.subStates['oneDPositionVelocity']['stateObject']
return
##
# @name Mandatory SubState Functions
# The following functions are required in order for this class to be used
# as a substate in ModularFilter. The inside of the functions may be
# changed or updated, but their "black box" behavior must remain the
# same; i.e. they must still perform the same essential functions and
# return the same things.
# @{
## @fun #storeStateVector stores an updated estimate of the state vector
def storeStateVector(
self,
svDict
):
# Unpack updated state vector values
self.t = svDict['t']
self.aPriori = svDict['aPriori']
# Compute new estimate of delay based on new state vector, store in
# svDict and local attributes
if not svDict['aPriori']:
self.correlationVector = svDict['stateVector'][0:self.__filterOrder__]
self.correlationVectorCovariance = svDict['covariance']
self.stateVector = svDict['stateVector']
if self.peakEstimator == 'UK':
tdoaDict = self.estimateSignalTDOA_UT(
self.correlationVector,
self.correlationVectorCovariance
)
elif self.peakEstimator == 'EK':
tdoaDict = self.estimateSignalTDOA_EK(
self.correlationVector,
self.correlationVectorCovariance
)
else:
raise ValueError('Unrecougnized peak finding algorithm %s' %self.peakEstimator)
newTDOA = (
(
tdoaDict['meanTDOA']
) *
self.__dT__
) + self.peakCenteringDT
newTDOAVar = tdoaDict['varTDOA'] * np.square(self.__dT__)
if not isnan(newTDOA) and not isnan(newTDOAVar):
self.signalTDOA = newTDOA
self.TDOAVar = newTDOAVar
svDict['signalTDOA'] = self.signalTDOA
svDict['TDOAVar'] = self.TDOAVar
if self.INF_type == 'external':
if (
(np.sqrt(self.TDOAVar) < (self.tdoaStdDevThreshold))
or (self.tdoaStdDevThreshold == 0)
):
self.internalNavFilter.measurementUpdateEKF(
{'position': {
'value': self.signalTDOA,
'var': self.TDOAVar * self.tdoaNoiseScaleFactor
}},
'oneDPositionVelocity'
)
else:
self.internalNavFilter.measurementUpdateEKF(
{}, ''
)
if self.peakLock is True and self.centerPeak is True:
self.peakOffsetFromCenter = tdoaDict['meanTDOA'] - self.__halfLength__ + 1
# self.peakOffsetFromCenter = np.mod(tdoaDict['meanTDOA'], self.__dT__)
# print(self.peakOffsetFromCenter)
else:
self.peakOffsetFromCenter = 0
else:
# if self.peakLock is True and self.centerPeak is True:
# svDict['stateVector'] = self.correlationVector
# else:
# self.correlationVector = svDict['stateVector']
# if self.peakOffsetFromCenter != 0:
# FLDict = self.buildFLMatrices(
# -self.peakOffsetFromCenter*self.__dT__,
# self.correlationVector
# )
# self.correlationVector = FLDict['F'].dot(self.correlationVector)
# self.peakOffsetFromCenter = 0
# self.correlationVector = svDict['stateVector'][0:self.__filterOrder__]
self.correlationVector = self.mostRecentF.dot(self.correlationVector)
svDict['stateVector'][0:self.__filterOrder__] = self.correlationVector
self.stateVector = svDict['stateVector']
self.correlationVectorCovariance = svDict['covariance']
if self.peakEstimator == 'UK':
tdoaDict = self.estimateSignalTDOA_UT(
self.correlationVector,
self.correlationVectorCovariance
)
elif self.peakEstimator == 'EK':
tdoaDict = self.estimateSignalTDOA_EK(
self.correlationVector,
self.correlationVectorCovariance
)
else:
raise ValueError('Unrecougnized peak finding algorithm %s' %self.peakEstimator)
# newTDOA = (
# (
# tdoaDict['meanTDOA']
# ) *
# self.__dT__
# ) + self.peakCenteringDT
newTDOAVar = tdoaDict['varTDOA'] * np.square(self.__dT__)
# self.signalTDOA = newTDOA
self.TDOAVar = newTDOAVar
svDict['signalTDOA'] = self.signalTDOA
svDict['TDOAVar'] = self.TDOAVar
self.peakOffsetFromCenter = 0
svDict['xAxis'] = self.xAxis + self.peakCenteringDT
# svDict['xAxis'] = self.xAxis - self.signalTDOA
tdoaSTD = np.sqrt(self.TDOAVar)
if tdoaSTD < (self.peakLockThreshold * self.__dT__):
if not self.peakLock:
print(
'Substate %s reached peak lock at time %s'
%(self.__trueSignal__.name, self.t)
)
self.peakLock = True
else:
if self.peakLock and tdoaSTD > (self.peakLockThreshold * self.__dT__ * 1.1):
print(
'Substate %s lost peak lock at time %s'
%(self.__trueSignal__.name, self.t)
)
self.peakLock = False
self.peakOffsetFromCenter = 0
if self.INF_type == 'deep':
fO = self.__filterOrder__
currentV = self.stateVector[fO]
currentVStdDev = np.sqrt(self.correlationVectorCovariance[fO,fO].value)
self.velocity = currentV
self.velocityStdDev = currentVStdDev
svDict['velocity'] = {'value':currentV, 'stddev': currentVStdDev}
if self.navVectorLength == 2:
currentA = self.stateVector[fO+1]
currentAStdDev = np.sqrt(self.correlationVectorCovariance[fO+1,fO+1].value)
svDict['acceleration'] = {'value':currentA, 'stddev': currentAStdDev}
self.acceleration = currentA
self.accelerationStdDev = currentAStdDev
elif self.navVectorLength == 3:
currentA = self.stateVector[fO+1]
currentAStdDev = np.sqrt(self.correlationVectorCovariance[fO+1,fO+1].value)
svDict['acceleration'] = {'value':currentA, 'stddev': currentAStdDev}
self.acceleration = currentA
self.accelerationStdDev = currentAStdDev
currentGrad = self.stateVector[fO+2]
currentGradStdDev = np.sqrt(self.correlationVectorCovariance[fO+2,fO+2].value)
svDict['aGradient'] = {'value':currentGrad, 'stddev': currentGradStdDev}
self.gradient = currentGrad
self.gradientStdDev = currentGradStdDev
elif self.INF_type == 'external':
self.velocity = self.navState.currentVelocity
self.velocityStdDev = np.sqrt(self.navState.velocityVar)
super().storeStateVector(svDict)
return
## @fun #timeUpdate returns the matrices for performing the correlation
# vector time update.
#
# @details This function calls the #buildTimeUpdateMatrices method to
# generate the time-update matrices.
#
# @param self The object pointer
# @param dT The amount of time ellapsed over which the time update is to
# be performed
# @param dynamics A dictionary containing the dynamics for the time update
# (e.g. velocity)
#
# @sa SubStates.SubState.timeUpdate
def timeUpdate(
self,
dT,
dynamics=None
):
if self.INF_type != 'deep':
timeUpdateMatrices = self.buildTimeUpdateMatrices(
dT, dynamics, self.correlationVector
)
L = timeUpdateMatrices['L']
Q = timeUpdateMatrices['Q']
Qmat = (
np.outer(L, L) * Q +
(
np.eye(self.__filterOrder__) *
self.processNoise * dT *
np.square(self.__trueSignal__.peakAmplitude * self.__dT__)
)
)
if dynamics is not None and 'acceleration' in dynamics:
oneDAcceleration = (
dynamics['acceleration']['value'].dot(self.__unitVecToSignal__) /
self.speedOfLight()
)
oneDAccelerationVar = (
self.__unitVecToSignal__.dot(
dynamics['acceleration']['value'].dot(
self.__unitVecToSignal__.transpose()
)
) / np.square(self.speedOfLight())
)
else:
oneDAcceleration = 0
oneDAccelerationVar = self.navProcessNoise
if self.INF_type == 'external':
self.internalNavFilter.timeUpdateEKF(
dT,
dynamics = {
'oneDPositionVelocityacceleration': {
'value': oneDAcceleration,
'var': oneDAccelerationVar
}
}
)
else:
timeUpdateMatrices = self.buildDeepTimeUpdateMatrices(dT, dynamics, self.correlationVector)
# if dynamics is not None and 'accelerationGrad' in dynamics:
# navProcessNoise = (
# dynamics['accelerationGrad']['value'].dot(self.__unitVecToSignal__) /
# self.speedOfLight()
# )
# oneDAccelerationGradVar = (
# self.__unitVecToSignal__.dot(
# dynamics['accelerationGrad']['value'].dot(
# self.__unitVecToSignal__.transpose()
# )
# ) / np.square(self.speedOfLight())
# )
# else:
L = timeUpdateMatrices['L']
Qmat = (
np.outer(L, L) * self.navProcessNoise + (
(
block_diag(np.eye(self.__filterOrder__),np.zeros([self.navVectorLength,self.navVectorLength])) *
self.processNoise * dT *
np.square(self.__trueSignal__.flux * self.__dT__)
)
)
)
self.mostRecentF = timeUpdateMatrices['F'][0:self.__filterOrder__, 0:self.__filterOrder__]
return {'F': timeUpdateMatrices['F'], 'Q': Qmat}
def buildDeepTimeUpdateMatrices(self,dT, dynamics, h):
FMatrixShift = -self.peakOffsetFromCenter
filterOrder = self.__filterOrder__
# Initialize empty matricies
F = np.zeros([filterOrder + self.navVectorLength, filterOrder+self.navVectorLength])
halfLength = self.__halfLength__
indexDiff = dT/self.__dT__
peakShift = self.stateVector[self.__filterOrder__] * indexDiff
# Velocity term
self.peakCenteringDT = (
self.peakCenteringDT + self.stateVector[self.__filterOrder__] * dT
)
if self.navVectorLength > 1:
# Acceleration term (if acceleration is being estimated)
self.peakCenteringDT = (
self.peakCenteringDT +
self.stateVector[self.__filterOrder__ + 1] * np.power(dT,2)/2
)
peakShift = (
peakShift + self.stateVector[self.__filterOrder__ + 1]*np.power(indexDiff,2)/2
)
if self.navVectorLength > 2:
# Acceleration gradient term
self.peakCenteringDT = (
self.peakCenteringDT +
self.stateVector[self.__filterOrder__] *
self.stateVector[self.__filterOrder__ + 2] *
np.power(dT,3)/6
)
peakShift = (
peakShift +
self.stateVector[self.__filterOrder__] *
self.stateVector[self.__filterOrder__ + 2] *
np.power(indexDiff,3)/6
)
self.peakCenteringDT = self.peakCenteringDT + (self.peakOffsetFromCenter*self.__dT__)
# Build arrays of indicies from which to form the sinc function
if np.mod(filterOrder, 2) == 0:
baseVec = (
np.linspace(
1 - halfLength,
halfLength,
filterOrder
)
)
else:
baseVec = (
np.linspace(
1 - halfLength,
halfLength - 1,
filterOrder
)
)
# Compute the sinc function of the base vector
sincBase = np.sinc(baseVec + FMatrixShift)
diffBase = np.zeros_like(sincBase)
for i in range(len(baseVec)):
diffBase[i] = self.sincDiff(baseVec[i] + peakShift)
sincBase = np.roll(sincBase, 1 - int(halfLength))
diffBase = np.roll(diffBase, 1 - int(halfLength))
for i in range(len(sincBase)):
currentDiff = np.roll(diffBase, i).dot(h)
F[i, 0:filterOrder] = np.roll(sincBase, i)
F[i, filterOrder] = currentDiff * indexDiff
if self.navVectorLength > 1:
F[i, filterOrder+1] = currentDiff * np.power(indexDiff, 2)/2
if self.navVectorLength > 2:
F[i, filterOrder+2] = (
currentDiff *
self.stateVector[filterOrder] *
np.power(indexDiff, 3)/6
)
L = np.zeros(filterOrder+self.navVectorLength)
if self.navVectorLength == 1:
F[filterOrder, filterOrder] = 1
L[filterOrder] = dT
elif self.navVectorLength == 2:
F[filterOrder, filterOrder] = 1.0
F[filterOrder, filterOrder+1] = dT
F[filterOrder+1, filterOrder+1] = 1.0
L[filterOrder] = np.power(dT, 2)/2
L[filterOrder + 1] = dT
elif self.navVectorLength == 3:
vCurrent = self.stateVector[filterOrder]
aCurrent = self.stateVector[filterOrder + 1]
gradCurrent = self.stateVector[filterOrder + 2]
F[filterOrder,filterOrder] = 1 + (gradCurrent * np.power(dT,2)/2)
F[filterOrder,filterOrder+1] = dT
F[filterOrder,filterOrder+2] = vCurrent * np.power(dT,2)/2
F[filterOrder+1,filterOrder+1] = 1
F[filterOrder+1,filterOrder+2] = vCurrent * dT
F[filterOrder+2,filterOrder+2] = 1
L[filterOrder] = vCurrent * np.power(dT,3)/6
L[filterOrder + 1] = vCurrent * np.power(dT,2)/2
L[filterOrder + 2] = dT
diffBase = np.zeros_like(sincBase)
for i in range(len(baseVec)):
diffBase[i] = self.sincDiff(baseVec[i])
diffBase = np.roll(diffBase, 1 - int(halfLength))
for i in range(len(baseVec)):
L[i] = (
np.roll(diffBase, i).dot(h) *
np.power(indexDiff,self.navVectorLength+1)/factorial(self.navVectorLength+1)
)
# # Setting L to zero for test purposes only
# L = np.zeros(filterOrder+self.navVectorLength)
return({'F':F, 'L':L})
def getMeasurementMatrices(
self,
measurement,
source=None
):
if (
(source.signalID() == self.__trueSignal__.signalID()) and
('t' in measurement)
):
measurementMatrices = self.getTOAMeasurementMatrices(
measurement,
self.correlationVector
)
HDict = {'correlationVector': measurementMatrices['H']}
RDict = {'correlationVector': measurementMatrices['R']}
dyDict = {'correlationVector': measurementMatrices['dY']}
else:
HDict = {'': None}
RDict = {'': None}
dyDict = {'': None}
measurementMatricesDict = {
'H': HDict,
'R': RDict,
'dY': dyDict
}
return measurementMatricesDict
## @}
## @fun #buildTimeUpdateMatrices constructs the correlation vector time
# update matrices
#
# @details The #buildTimeUpdateMatrices method constructs the matrices required to perform the time update of the correlation vector sub-state.
#
# The time update matrices are a function of the estimated spacecraft velocity (\f$\mathbf{v}\f$), velocity variance (\f$\mathbf{P}_{\mathbf{v}}\f$), and the elapsed time over which the time update occurs (\f$\Delta T\f$). The matrices are constructed as follows:
#
# \f[
# \mathbf{F}_{j \to k} = \begin{bmatrix}
# \textrm{sinc}(\hat{\delta}) & \hdots & \textrm{sinc}(\hat{\delta} + N - 1) \\
# \vdots & \ddots & \vdots \\
# \textrm{sinc}(\hat{\delta} - N + 1) & \hdots & \textrm{sinc}(\hat{\delta})
# \end{bmatrix}
# \f]
#
# \f[
# \mathbf{L}_{j} = \begin{bmatrix}
# \frac{\textrm{cos}}{(\hat{\delta})} - \frac{\textrm{sin}}{(\hat{\delta}^2)} & \hdots \\
# \vdots & \ddots \\
# \end{bmatrix} \sv[timeIndex = k]
# \f]
#
# \f[
# Q_{\delta} = \left(\frac{(k-j)}{c}\right)^2
# {\LOSVec[S]}^T \mathbf{P}_{\mathbf{v}} \LOSVec[S]
# \f]
#
# where
#
# \f[
# \hat{\delta}_{j \to k} = \frac{\mathbf{v} \LOSVec[S] \Delta T}{c T}
# \f]
#
# @param self The object pointer
# @param deltaT The amount of time over which the time update is occuring
# @param dynamics A dictionary containing the relevant dynamics for the
# time update
# @param h The current correlation vector
#
# @returns A dictionary containing the matrices \f$\mathbf{F}\f$,
# \f$\mathbf{L}\f$, and the scalar \f$Q\f
def buildTimeUpdateMatrices(
self,
deltaT,
dynamics,
h
):
indexDiff = deltaT/self.__dT__
if (
(dynamics is not None and 'velocity' in dynamics) or
(
self.INF_type == 'external' and
(
(np.sqrt(self.navState.velocityVar) < self.velStdDevThreshold) or
self.velStdDevThreshold == 0
)
)
):
if 'velocity' in dynamics:
velocity = dynamics['velocity']['value']
vVar = dynamics['velocity']['var'] * self.velocityNoiseScaleFactor
peakShift = (
(velocity.dot(self.__unitVecToSignal__) * indexDiff) /
self.speedOfLight()
)
# velocityTDOA = peakShift * self.__dT__
velocityTDOA = (
velocity.dot(self.__unitVecToSignal__) * deltaT /
self.speedOfLight()
)
Q = (
self.__unitVecToSignal__.dot(
vVar
).dot(self.__unitVecToSignal__) *
np.square(indexDiff / self.speedOfLight())
)
tdoaQ = (
self.__unitVecToSignal__.dot(vVar
).dot(self.__unitVecToSignal__) *
np.square(deltaT/self.speedOfLight()))
elif self.INF_type=='external':
peakShift = self.navState.currentVelocity * indexDiff
velocityTDOA = self.navState.currentVelocity * deltaT
Q = self.navState.velocityVar * np.square(indexDiff) * self.velocityNoiseScaleFactor
tdoaQ = self.navState.velocityVar * np.square(deltaT) * self.velocityNoiseScaleFactor
else:
velocityTDOA = 0
peakShift = 0
Q = self.navProcessNoise * np.power(indexDiff,4)/4
tdoaQ = self.navProcessNoise * np.power(deltaT,4)/4
FMatrixShift = -self.peakOffsetFromCenter # - peakShift
self.signalTDOA = (
self.signalTDOA +
velocityTDOA
)
self.TDOAVar = self.TDOAVar + tdoaQ
self.peakCenteringDT = (
self.peakCenteringDT + velocityTDOA +
(self.peakOffsetFromCenter*self.__dT__)
)
# Initialize empty matricies
F = np.zeros([self.__filterOrder__, self.__filterOrder__])
L = np.zeros([self.__filterOrder__, self.__filterOrder__])
# Build arrays of indicies from which to form the sinc function
if np.mod(self.__filterOrder__, 2) == 0:
baseVec = (
np.linspace(
1 - self.__halfLength__,
self.__halfLength__,
self.__filterOrder__
)
)
else:
baseVec = (
np.linspace(
1 - self.__halfLength__,
self.__halfLength__ - 1,
self.__filterOrder__
)
)
# Compute the sinc function of the base vector
sincBase = np.sinc(baseVec + FMatrixShift)
diffBase = np.zeros_like(sincBase)
for i in range(len(baseVec)):
diffBase[i] = self.sincDiff(baseVec[i] + peakShift)
sincBase = np.roll(sincBase, 1 - int(self.__halfLength__))
diffBase = np.roll(diffBase, 1 - int(self.__halfLength__))
for i in range(len(F)):
F[i] = np.roll(sincBase, i)
L[i] = np.roll(diffBase, i)
L = L.dot(h)
# else:
# # If no velocity was included in dynamics, then do nothing during
# # time update
# F = np.eye(self.__filterOrder__)
# L = np.zeros(self.__filterOrder__)
# Q = 0
timeUpdateDict = {
'F': F,
'L': L,
'Q': Q
}
return(timeUpdateDict)
def buildFLMatrices(self, peakShift, h):
# Initialize empty matricies
F = np.zeros([self.__filterOrder__, self.__filterOrder__])
L = np.zeros([self.__filterOrder__, self.__filterOrder__])
# Build arrays of indicies from which to form the sinc function
if np.mod(self.__filterOrder__, 2) == 0:
baseVec = (
np.linspace(
1 - self.__halfLength__,
self.__halfLength__,
self.__filterOrder__
)
)
else:
baseVec = (
np.linspace(
1 - self.__halfLength__,
self.__halfLength__ - 1,
self.__filterOrder__
)
)
# Compute the sinc function of the base vector
sincBase = np.sinc(baseVec + peakShift)
diffBase = np.zeros_like(sincBase)
for i in range(len(baseVec)):
diffBase[i] = self.sincDiff(baseVec[i] + peakShift)
sincBase = np.roll(sincBase, 1 - int(self.__halfLength__))
diffBase = np.roll(diffBase, 1 - int(self.__halfLength__))
for i in range(len(F)):
F[i] = np.roll(sincBase, i)
L[i] = np.roll(diffBase, i)
L = L.dot(h)
return {'F':F, 'L':L}
## @}
## @{
# @name Functions Specific to #CorrelationVector
#
# The following remaining functions are not required in order for this
# class to be used as a SubState, and may be changed as needed,
# including inputs and outputs.
def getTOAMeasurementMatrices(
self,
measurement,
corrVec
):
photonTOA = measurement['t']['value']
adjustedTOA = photonTOA + self.peakCenteringDT
H = np.eye(self.__filterOrder__)
if self.INF_type == 'deep':
H = np.append(H, np.zeros([self.__filterOrder__, self.navVectorLength]), axis=1)
timeVector = np.linspace(
0,
(self.__filterOrder__ - 1),
self.__filterOrder__
)
timeVector = timeVector * self.__dT__
timeVector = (
timeVector + adjustedTOA
)
# if self.peakLock is True:
# timeVector = timeVector - self.signalDelay
signalTimeHistory = np.zeros(self.__filterOrder__)
halfDT = self.__dT__/2.0
# for timeIndex in range(len(timeVector)):
# signalTimeHistory[timeIndex] = (
# self.__trueSignal__.getSignal(timeVector[timeIndex]) *
# self.__dT__
# )
for timeIndex in range(len(timeVector)):
signalTimeHistory[timeIndex] = (
self.__trueSignal__.signalIntegral(
timeVector[timeIndex]-halfDT,
timeVector[timeIndex] + halfDT
)
)
# plt.plot(signalTimeHistory)
# plt.show(block=False)
# 1/0
# print(corrVec)
# print(signalTimeHistory)
dY = signalTimeHistory - corrVec
R = (
np.eye(self.__filterOrder__) *
#self.__trueSignal__.flux *
self.__trueSignal__.peakAmplitude *
self.__dT__ *
np.dot(corrVec, corrVec) *
self.measurementNoiseScaleFactor
)
measMatDict = {
'H': H,
'dY': dY,
'R': R
}
return measMatDict
## @fun #computeSignalTDOA computes the delay between the #__trueSignal__ and
# measurements based on a correlation vector
#
# @details The #computeSignalDelay function is a rudimentary function
# which takes an estimate of the correlation vector and uses it to
# estimate the location of the peak. It functions by finding the tap with
# the maximum value, and then fitting a quadratic to the points
# surrounding the maximum value tap. The number of points to which the
# quadratic is fitted is determined by the value of #peakFitPoints; the
# number of points is equal to \f$2 * n + 1\f$ where \f$n = \f$
# #peakFitPoints.
#
# The delay estimate that is returned is in units of #__dT__. So, a returned
# value of 2 would imply that the peak is located at 2, and therefore the
# delay corresponding to the correlation vector is 2 #__dT__.
#
# The returned delay may not include previously accumulated #signalDelay
# between the signal and the measurements. See the #storeStateVector
# function for more information on how the #signalDelay is stored and
# accumulated delay is accounted for.
#
# @param self The object pointer
# @param c The correlation vector
# @param P the correlation vector covariance matrix
#
# @return The estimate of the delay
def computeSignalTDOA(
self,
c,
P
):
# First estimate of peak location is the location of the max value
peakLocation = np.argmax(c)
# Next, we "roll" the correlation vector so that the values being
# fitted quadratically are the first 2 * peakFitPoints + 1 values
lowerBound = peakLocation - self.peakFitPoints
upperBound = lowerBound + (self.peakFitPoints * 2) + 1
if (lowerBound < 0) or (upperBound > self.__filterOrder__):
mySlice = range(lowerBound, upperBound)
slicedC = c.take(mySlice, mode='wrap')
slicedP = P.take(mySlice, axis=0, mode='wrap').take(mySlice, axis=1, mode='wrap')
else:
mySlice = slice(lowerBound, upperBound)
slicedC = c[mySlice]
slicedP = P[mySlice, mySlice]
# xVec is the vector of "x" values corresponding the "y" values to
# which the quadratic is being fit.
xVec = self.__xVec__
# xVec = xVec - rollFactor
xVec = xVec + lowerBound
# Get the quadratic function that fits the peak and surrounding values,
# and use it to estimate the location of the max
# print(slicedC)
if len(xVec) == 3:
TDOA = self.peakFinder(xVec, slicedC)
else:
quadraticVec = self.quadraticFit(xVec, slicedC)
try:
TDOA = (-quadraticVec[1] / (2 * quadraticVec[0]))
except:
TDOA = xVec[peakLocation]
return TDOA
## @fun #estimateSignalTDOA_UT uses a unscented tranform to estimate the
# delay corresponding to a correlation vector
#
# @details The #estimateSignalDelayUT method is responsible for computing
# the estimated value of delay corresponding to a correlation vector, as
# well as the variance of that estimate. These values are computed using
# a unscented transform (i.e. sigma-point) approach.
#
# The method receives the an estimate of the correlation vector, as well
# as the covariance matrix corresponding to that vector. From there it
# computes a set of n sigma points (where n is the length of the
# correlation vector), and for each of the generated sigma point vectors,
# it computes the peak location using the #computeSignalDelay method.
#
# @param self The object pointer
# @param h The correlation vector
# @param P The correlation vector covariance matrix
#
# @returns A dict containing the estimate of the peak location
# ("meanDelay") and the estimate variance ("varDelay")
def estimateSignalTDOA_UT(
self,
h,
P,
useMean=True
):
# Compute sigma points
hDimension = len(h)
maxHIndex = np.argmax(h)
rollAmount = -maxHIndex + self.__halfLength__
# rollAmount = 1
# hRolled = np.roll(h, rollAmount)
# PRolled = np.roll(np.roll(P.value, rollAmount, axis=0), rollAmount, axis=1)
# Compute the square root of P.
if P.form == 'covariance':
sqrtP = np.linalg.cholesky(
hDimension * P.value[0:self.__filterOrder__, 0:self.__filterOrder__]
)
elif P.form == 'cholesky':
# PVal = P.convertCovariance('covariance').value
# sqrtP = np.linalg.cholesky(hDimension * PVal)
sqrtP = P.value[0:self.__filterOrder__, 0:self.__filterOrder__] * np.sqrt(hDimension)
sigmaPoints = h + np.append(sqrtP, -sqrtP, axis=0)
# Append one more row of sigma points containing the unmodified estimate
sigmaPoints = np.append(np.array([h]), sigmaPoints, axis=0)
# Initiate vector to store the resulting peaks from each sigma point
sigmaPointResults = np.zeros(len(sigmaPoints))
# Compute the peak corresponding to each sigma point vector
for i in range(len(sigmaPoints)):
sigmaPointResults[i] = (
self.computeSignalTDOA(sigmaPoints[i], P.convertCovariance('covariance').value)
)
#meanTDOA = np.mean(sigmaPointResults)
meanTDOA = sigmaPointResults[0]
for i in range(len(sigmaPoints)):
if (meanTDOA - sigmaPointResults[i]) > self.__halfLength__:
sigmaPointResults[i] += self.__dimension__
elif (sigmaPointResults[i] - meanTDOA) > self.__halfLength__:
sigmaPointResults[i] -= self.__dimension__
# meanTDOA = self.computeSignalTDOA(h, P)
varTDOA = np.var(sigmaPointResults)
return {'meanTDOA': meanTDOA, 'varTDOA': varTDOA, 'sigmaPoints': sigmaPointResults}
def estimateSignalTDOA_EK(self, h, P):
if P.form == 'covariance':
P = P.value[0:self.__filterOrder__, 0:self.__filterOrder__]
elif P.form == 'cholesky':
P = P.convertCovariance('covariance').value[0:self.__filterOrder__, 0:self.__filterOrder__]
# First estimate of peak location is the location of the max value
peakLocation = np.argmax(h)
# Next, we "roll" the correlation vector so that the values being
# fitted quadratically are the first 3 values
lowerBound = peakLocation - 1
upperBound = lowerBound + (1 * 2) + 1
if (lowerBound < 0) or (upperBound > self.__filterOrder__):
mySlice = range(lowerBound, upperBound)
slicedC = h.take(mySlice, mode='wrap')
slicedP = P.take(mySlice, axis=0, mode='wrap').take(mySlice, axis=1, mode='wrap')
else:
mySlice = slice(lowerBound, upperBound)
slicedC = h[mySlice]
slicedP = P[mySlice, mySlice]
# xVec is the vector of "x" values corresponding the "y" values to
# which the quadratic is being fit.
xVec = self.__xVec__
xVec = xVec + lowerBound
# Get the quadratic function that fits the peak and surrounding values,
# and use it to estimate the location of the max
TDOA = self.peakFinder(xVec, slicedC)
jacobian = self.peakFinderJacobian(xVec, slicedC)
variance = jacobian.dot(slicedP).dot(jacobian.transpose())
return {'meanTDOA': TDOA, 'varTDOA': variance}
def speedOfLight(
self
):
return (299792)
@staticmethod
def sincDiff(x):
if np.abs(x) < 1e-100:
myDiff = 0.0
else:
piX = np.pi*x
# myDiff = np.pi * (
# (((np.pi * x) * np.cos(x * np.pi)) - np.sin(x * np.pi))
# /
# np.square(x * np.pi)
# )
myDiff = (piX*np.cos(piX) - np.sin(piX))/(np.pi * np.power(x,2))
# myDiff
return myDiff
@staticmethod
def quadraticFit(x, y):
X_T = np.array([np.power(x, 2), x, np.ones(len(x))])
X = X_T.transpose()
if len(x) < 3:
raise ValueError(
"Cannot fit a quadratic to less than three data points."
)
elif len(x) == 3:
# Note: Suprisingly, it is faster to directly invert the X matrix
# than it is to do a linear solve. Strange.
#coef = np.linalg.solve(X, y)
coef = np.linalg.inv(X).dot(y)
else:
#coef = np.linalg.solve(X_T.dot(X).dot(X_T), y)
coef = np.linalg.inv(X_T.dot(X)).dot(X_T).dot(y)
return coef
def initializeRealTimePlot(
self,
plotHandle=None,
axisHandle=None
):
super().initializeRealTimePlot(plotHandle, axisHandle)
self.RTPlotTDOA = self.RTPaxisHandle.scatter(
self.signalTDOA,
1
)
self.RTPlotTDOA_error, = self.RTPaxisHandle.plot(
[
self.signalTDOA - np.sqrt(self.TDOAVar),
self.signalTDOA + np.sqrt(self.TDOAVar)
],
[1,1]
)
return
def realTimePlot(
self,
normalized=True
):
if self.RTPlotHandle is None:
self.initializeRealTimePlot()
self.RTPlotTDOA.set_offsets([self.signalTDOA, 1])
self.RTPlotTDOA_error.set_data(
[
self.signalTDOA - np.sqrt(self.TDOAVar),
self.signalTDOA + np.sqrt(self.TDOAVar)
],
[1,1]
)
super().realTimePlot(normalized, substateRange = slice(0,self.__filterOrder__))
return
@staticmethod
def peakFinder(x,y):
x1 = x[0]
x2 = x[1]
x3 = x[2]
y1 = y[0]
y2 = y[1]
y3 = y[2]
x0 = (
-(y1*(np.square(x3) - np.square(x2)) + y2*(np.square(x1) - np.square(x3)) + y3*(np.square(x2) - np.square(x1)))
/
(2*(y1*(x2-x3) + y2*(x3-x1) + y3*(x1-x2)))
)
x0 = (
(
y1*(np.square(x2)-np.square(x3)) +
y2*(np.square(x3)-np.square(x1)) +
y3*(np.square(x1)-np.square(x2))
)
/
(2*(y1*(x2-x3) + y2*(x3-x1) + y3*(x1-x2)))
)
return(x0)
@staticmethod
def peakFinderJacobian(x,y):
x1 = x[0]
x2 = x[1]
x3 = x[2]
y1 = y[0]
y2 = y[1]
y3 = y[2]
A = np.square(x2) - np.square(x3)
B = np.square(x3) - np.square(x1)
C = np.square(x1) - np.square(x2)
D = x2-x3
E = x3-x1
# E = x1-x2
F = x1-x2
AE = A*E
AF = A*F
BD = B*D
BF = B*F
CD = C*D
CE = C*E
denom = 2*np.power(((D*y1) + (E*y2) + (F*y3)),2)
dT_dy1 = (
((AE - BD)*y2 + (AF - CD)*y3)
/
denom
)
dT_dy2 = (
((BD - AE)*y1 + (BF - CE)*y3)
/
denom
)
dT_dy3 = (
((CD - AF)*y1 + (CE - BF)*y2)
/
denom
)
return np.array([dT_dy1, dT_dy2, dT_dy3])
| true | true |
1c3b2538f2746e7121c92de4686cb7c95ff849c8 | 221 | py | Python | shop_stripe/context_processors.py | execut/djangoshop-stripe | 09252e63cd92b92841b7fdb93517c6e6e4f29f23 | [
"MIT"
] | 11 | 2016-02-12T21:57:52.000Z | 2021-11-26T16:59:19.000Z | shop_stripe/context_processors.py | execut/djangoshop-stripe | 09252e63cd92b92841b7fdb93517c6e6e4f29f23 | [
"MIT"
] | 10 | 2016-05-27T09:14:57.000Z | 2020-08-29T18:37:51.000Z | shop_stripe/context_processors.py | execut/djangoshop-stripe | 09252e63cd92b92841b7fdb93517c6e6e4f29f23 | [
"MIT"
] | 6 | 2017-03-15T20:11:21.000Z | 2020-12-09T21:09:25.000Z | from django.conf import settings
def public_keys(request):
return {
'SHOP_STRIPE_PUBKEY': settings.SHOP_STRIPE['PUBKEY'],
'SHOP_STRIPE_PREFILL': getattr(settings, 'SHOP_STRIPE_PREFILL', False)
}
| 24.555556 | 78 | 0.705882 | from django.conf import settings
def public_keys(request):
return {
'SHOP_STRIPE_PUBKEY': settings.SHOP_STRIPE['PUBKEY'],
'SHOP_STRIPE_PREFILL': getattr(settings, 'SHOP_STRIPE_PREFILL', False)
}
| true | true |
1c3b275d469f25254cb1749dcc0fbb602fbe2454 | 2,488 | py | Python | src/folio_migration_tools/helper.py | chadmcinnis/folio_migration_tools | 39ee044a713a34c323324a956e3e8b54ee05c194 | [
"MIT"
] | null | null | null | src/folio_migration_tools/helper.py | chadmcinnis/folio_migration_tools | 39ee044a713a34c323324a956e3e8b54ee05c194 | [
"MIT"
] | null | null | null | src/folio_migration_tools/helper.py | chadmcinnis/folio_migration_tools | 39ee044a713a34c323324a956e3e8b54ee05c194 | [
"MIT"
] | null | null | null | import json
import logging
class Helper:
@staticmethod
def print_mapping_report(
report_file, total_records: int, mapped_folio_fields, mapped_legacy_fields
):
details_start = "<details><summary>Click to expand field report</summary> \n\n"
details_end = "</details> \n"
report_file.write("\n## Mapped FOLIO fields\n")
# report_file.write(f"{blurbs[header]}\n")
d_sorted = {k: mapped_folio_fields[k] for k in sorted(mapped_folio_fields)}
report_file.write(details_start)
report_file.write("FOLIO Field | Mapped | Unmapped \n")
report_file.write("--- | --- | ---: \n")
for k, v in d_sorted.items():
unmapped = max(total_records - v[0], 0)
mapped = v[0]
mp = mapped / total_records if total_records else 0
mapped_per = "{:.0%}".format(max(mp, 0))
report_file.write(f"{k} | {max(mapped, 0):,} ({mapped_per}) | {unmapped:,} \n")
report_file.write(details_end)
report_file.write("\n## Mapped Legacy fields\n")
# report_file.write(f"{blurbs[header]}\n")
d_sorted = {k: mapped_legacy_fields[k] for k in sorted(mapped_legacy_fields)}
report_file.write(details_start)
report_file.write("Legacy Field | Present | Mapped | Unmapped \n")
report_file.write("--- | --- | --- | ---: \n")
for k, v in d_sorted.items():
present = v[0]
present_per = "{:.1%}".format(present / total_records if total_records else 0)
unmapped = present - v[1]
mapped = v[1]
mp = mapped / total_records if total_records else 0
mapped_per = "{:.0%}".format(max(mp, 0))
report_file.write(
f"{k} | {max(present, 0):,} ({present_per}) | {max(mapped, 0):,} "
f"({mapped_per}) | {unmapped:,} \n"
)
report_file.write(details_end)
@staticmethod
def log_data_issue(index_or_id, message, legacy_value):
logging.log(26, "DATA ISSUE\t%s\t%s\t%s", index_or_id, message, legacy_value)
@staticmethod
def write_to_file(file, folio_record, pg_dump=False):
"""Writes record to file. pg_dump=true for importing directly via the
psql copy command"""
if pg_dump:
file.write("{}\t{}\n".format(folio_record["id"], json.dumps(folio_record)))
else:
file.write("{}\n".format(json.dumps(folio_record)))
| 41.466667 | 92 | 0.586415 | import json
import logging
class Helper:
@staticmethod
def print_mapping_report(
report_file, total_records: int, mapped_folio_fields, mapped_legacy_fields
):
details_start = "<details><summary>Click to expand field report</summary> \n\n"
details_end = "</details> \n"
report_file.write("\n## Mapped FOLIO fields\n")
d_sorted = {k: mapped_folio_fields[k] for k in sorted(mapped_folio_fields)}
report_file.write(details_start)
report_file.write("FOLIO Field | Mapped | Unmapped \n")
report_file.write("--- | --- | ---: \n")
for k, v in d_sorted.items():
unmapped = max(total_records - v[0], 0)
mapped = v[0]
mp = mapped / total_records if total_records else 0
mapped_per = "{:.0%}".format(max(mp, 0))
report_file.write(f"{k} | {max(mapped, 0):,} ({mapped_per}) | {unmapped:,} \n")
report_file.write(details_end)
report_file.write("\n## Mapped Legacy fields\n")
d_sorted = {k: mapped_legacy_fields[k] for k in sorted(mapped_legacy_fields)}
report_file.write(details_start)
report_file.write("Legacy Field | Present | Mapped | Unmapped \n")
report_file.write("--- | --- | --- | ---: \n")
for k, v in d_sorted.items():
present = v[0]
present_per = "{:.1%}".format(present / total_records if total_records else 0)
unmapped = present - v[1]
mapped = v[1]
mp = mapped / total_records if total_records else 0
mapped_per = "{:.0%}".format(max(mp, 0))
report_file.write(
f"{k} | {max(present, 0):,} ({present_per}) | {max(mapped, 0):,} "
f"({mapped_per}) | {unmapped:,} \n"
)
report_file.write(details_end)
@staticmethod
def log_data_issue(index_or_id, message, legacy_value):
logging.log(26, "DATA ISSUE\t%s\t%s\t%s", index_or_id, message, legacy_value)
@staticmethod
def write_to_file(file, folio_record, pg_dump=False):
if pg_dump:
file.write("{}\t{}\n".format(folio_record["id"], json.dumps(folio_record)))
else:
file.write("{}\n".format(json.dumps(folio_record)))
| true | true |
1c3b27937721cf673238b260e0824e37543372f2 | 606 | py | Python | venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-HtmlTestRunner.py | avrumnoor/NewsSummarizer | a963497ef9bc62d2148aa28e624ea32955992f57 | [
"MIT"
] | 6 | 2021-07-14T03:23:17.000Z | 2021-08-07T05:07:21.000Z | venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-HtmlTestRunner.py | avrumnoor/NewsSummarizer | a963497ef9bc62d2148aa28e624ea32955992f57 | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-HtmlTestRunner.py | avrumnoor/NewsSummarizer | a963497ef9bc62d2148aa28e624ea32955992f57 | [
"MIT"
] | 4 | 2021-07-13T19:44:06.000Z | 2021-08-13T07:49:35.000Z | # ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
# Hook for HtmlTestRunner: https://pypi.org/project/html-testRunner//1.2.1
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('HtmlTestRunner')
| 33.666667 | 74 | 0.59571 |
from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('HtmlTestRunner')
| true | true |
1c3b2877d359139a9c4bd39cb9d2c6770ac9a292 | 3,123 | py | Python | qiskit_nature/drivers/base_driver.py | SooluThomas/qiskit-nature | 0d509525b68b76d0f2d613d0e7409b9ea65cfcc0 | [
"Apache-2.0"
] | 1 | 2021-06-20T15:31:01.000Z | 2021-06-20T15:31:01.000Z | qiskit_nature/drivers/base_driver.py | SooluThomas/qiskit-nature | 0d509525b68b76d0f2d613d0e7409b9ea65cfcc0 | [
"Apache-2.0"
] | 1 | 2021-11-11T06:33:41.000Z | 2021-11-11T06:33:41.000Z | qiskit_nature/drivers/base_driver.py | SooluThomas/qiskit-nature | 0d509525b68b76d0f2d613d0e7409b9ea65cfcc0 | [
"Apache-2.0"
] | 3 | 2021-07-02T06:57:58.000Z | 2021-07-06T12:32:38.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This module implements the abstract base class for driver modules.
"""
from typing import Optional
from abc import ABC, abstractmethod
from .molecule import Molecule
from ..exceptions import QiskitNatureError
from ..deprecation import DeprecatedType, warn_deprecated_same_type_name
class BaseDriver(ABC):
"""**DEPRECATED** Base class for Qiskit Nature drivers."""
@abstractmethod
def __init__(
self,
molecule: Optional[Molecule] = None,
basis: str = "sto3g",
hf_method: str = "rhf",
supports_molecule: bool = False,
) -> None:
"""
Args:
molecule: molecule
basis: basis set
hf_method: Hartree-Fock Method type
supports_molecule: Indicates if driver supports molecule
Raises:
QiskitNatureError: Molecule passed but driver doesn't support it.
"""
warn_deprecated_same_type_name(
"0.2.0",
DeprecatedType.CLASS,
"BaseDriver",
"from qiskit_nature.drivers.second_quantization as a direct replacement",
3,
)
if molecule is not None and not supports_molecule:
raise QiskitNatureError("Driver doesn't support molecule.")
self._molecule = molecule
self._basis = basis
self._hf_method = hf_method
self._supports_molecule = supports_molecule
@abstractmethod
def run(self):
"""
Runs a driver to produce an output data structure.
"""
raise NotImplementedError()
@property
def supports_molecule(self) -> bool:
"""
True for derived classes that support Molecule.
Returns:
True if Molecule is supported.
"""
return self._supports_molecule
@property
def molecule(self) -> Optional[Molecule]:
"""return molecule"""
return self._molecule
@molecule.setter
def molecule(self, value: Molecule) -> None:
"""set molecule"""
if not self.supports_molecule:
raise QiskitNatureError("Driver doesn't support molecule.")
self._molecule = value
@property
def basis(self) -> str:
"""return basis"""
return self._basis
@basis.setter
def basis(self, value: str) -> None:
"""set basis"""
self._basis = value
@property
def hf_method(self) -> str:
"""return Hartree-Fock method"""
return self._hf_method
@hf_method.setter
def hf_method(self, value: str) -> None:
"""set Hartree-Fock method"""
self._hf_method = value
| 28.390909 | 85 | 0.632085 |
from typing import Optional
from abc import ABC, abstractmethod
from .molecule import Molecule
from ..exceptions import QiskitNatureError
from ..deprecation import DeprecatedType, warn_deprecated_same_type_name
class BaseDriver(ABC):
@abstractmethod
def __init__(
self,
molecule: Optional[Molecule] = None,
basis: str = "sto3g",
hf_method: str = "rhf",
supports_molecule: bool = False,
) -> None:
warn_deprecated_same_type_name(
"0.2.0",
DeprecatedType.CLASS,
"BaseDriver",
"from qiskit_nature.drivers.second_quantization as a direct replacement",
3,
)
if molecule is not None and not supports_molecule:
raise QiskitNatureError("Driver doesn't support molecule.")
self._molecule = molecule
self._basis = basis
self._hf_method = hf_method
self._supports_molecule = supports_molecule
@abstractmethod
def run(self):
raise NotImplementedError()
@property
def supports_molecule(self) -> bool:
return self._supports_molecule
@property
def molecule(self) -> Optional[Molecule]:
return self._molecule
@molecule.setter
def molecule(self, value: Molecule) -> None:
if not self.supports_molecule:
raise QiskitNatureError("Driver doesn't support molecule.")
self._molecule = value
@property
def basis(self) -> str:
return self._basis
@basis.setter
def basis(self, value: str) -> None:
self._basis = value
@property
def hf_method(self) -> str:
return self._hf_method
@hf_method.setter
def hf_method(self, value: str) -> None:
self._hf_method = value
| true | true |
1c3b293e5e02c883134daa1be35695079105d5dc | 1,479 | py | Python | the-lego-collector-s-dilemma-(linear-regression)/code.py | rsaurabh799/ga-learner-dsmp-repo | 024f054e0385fd5faa24804004e25d9f849363aa | [
"MIT"
] | null | null | null | the-lego-collector-s-dilemma-(linear-regression)/code.py | rsaurabh799/ga-learner-dsmp-repo | 024f054e0385fd5faa24804004e25d9f849363aa | [
"MIT"
] | null | null | null | the-lego-collector-s-dilemma-(linear-regression)/code.py | rsaurabh799/ga-learner-dsmp-repo | 024f054e0385fd5faa24804004e25d9f849363aa | [
"MIT"
] | null | null | null | # --------------
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# code starts here
df = pd.read_csv(path)
print(df.head(5))
X = df.drop('list_price',axis=1)
y = df['list_price']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 6)
# code ends here
# --------------
import matplotlib.pyplot as plt
# code starts here
cols = np.array(['ages','num_reviews','piece_count','play_star_rating','review_difficulty','star_rating','theme_name','val_star_rating','country'])
fig,axes = plt.subplots(nrows = 3 , ncols = 3,figsize=(20,10))
for i in range(3):
for j in range(3):
col = cols[i*3+j]
print(i*3+j)
axes[i,j].scatter(X_train[col],y_train)
# code ends here
# --------------
# Code starts here
corr = X_train.corr()
print(corr)
X_train.drop(['play_star_rating','val_star_rating'],axis=1,inplace = True)
X_test.drop(['play_star_rating','val_star_rating'],axis=1,inplace = True)
# Code ends here
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
# Code starts here
regressor = LinearRegression()
regressor.fit(X_train,y_train)
y_pred = regressor.predict(X_test)
mse = mean_squared_error(y_test,y_pred)
r2 = r2_score(y_test,y_pred)
print(mse)
print(r2)
# Code ends here
# --------------
# Code starts here
residual = y_test - y_pred
plt.hist(residual)
# Code ends here
| 19.72 | 147 | 0.684246 |
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
df = pd.read_csv(path)
print(df.head(5))
X = df.drop('list_price',axis=1)
y = df['list_price']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 6)
import matplotlib.pyplot as plt
cols = np.array(['ages','num_reviews','piece_count','play_star_rating','review_difficulty','star_rating','theme_name','val_star_rating','country'])
fig,axes = plt.subplots(nrows = 3 , ncols = 3,figsize=(20,10))
for i in range(3):
for j in range(3):
col = cols[i*3+j]
print(i*3+j)
axes[i,j].scatter(X_train[col],y_train)
corr = X_train.corr()
print(corr)
X_train.drop(['play_star_rating','val_star_rating'],axis=1,inplace = True)
X_test.drop(['play_star_rating','val_star_rating'],axis=1,inplace = True)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
regressor = LinearRegression()
regressor.fit(X_train,y_train)
y_pred = regressor.predict(X_test)
mse = mean_squared_error(y_test,y_pred)
r2 = r2_score(y_test,y_pred)
print(mse)
print(r2)
residual = y_test - y_pred
plt.hist(residual)
| true | true |
1c3b2949d9ded9a69579d2d96b2ece4af1dc088f | 3,144 | py | Python | Solver/attenuation.py | steveknipmeyer/ModelRelief | a3d067e0ed39a3a8ca78896c21eaa3e7293b15a2 | [
"MIT"
] | null | null | null | Solver/attenuation.py | steveknipmeyer/ModelRelief | a3d067e0ed39a3a8ca78896c21eaa3e7293b15a2 | [
"MIT"
] | null | null | null | Solver/attenuation.py | steveknipmeyer/ModelRelief | a3d067e0ed39a3a8ca78896c21eaa3e7293b15a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
.. module:: Attenutation
:synopsis: Support for attenuation of image components.
.. moduleauthor:: Steve Knipmeyer <steve@modelrelief.org>
"""
import numpy as np
from mathtools import MathTools
from logger import Logger
from services import Services
from tools import Colors
class AttenuationParameters:
"""
A class for holding the parameters of an attenuation function.
"""
def __init__(self, enabled: bool, factor: float, decay: float) -> None:
"""
Initialize an instance of AttenuationParameters.
Parameters
----------
enabled
Enable attenuation processing.
factor
This is the percentage of the mean absolute value of the gradient.
a = factor * mean(|gradient|)
a determines the point where amplification becomes reduction.
v < a, v is amplified.
v = a, v is unchanged.
v > a, v is reduced.
b
Controls the rate at which the attenuation curve decays.
"""
self.enabled: bool = enabled
self.factor: float = factor
self.decay: float = decay
class Attenuation:
"""
A class for attenuating image components.
"""
def __init__(self, services : Services) -> None:
"""
Initialize an instance of Attenuation.
Parameters
----------
services
Service provider (logging, timers, etc.)
"""
self.debug = False
self.services = services
def apply (self, array: np.ndarray, parameters: AttenuationParameters) -> np.ndarray:
"""
N.B. See Jupyter notebook Attenuation.ipynb for a test workbench.
Applies the attenuation function to all elements in an ndarray.
Parameters
----------
original
The array to apply the attenuation function against.
parameters
The AttenuationParameters (factor, decay):
factor -> boundary between amplication and reduction; percentage of mean absolute value of gradient
decay -> rate of decay of attenuation curve
"""
epsilon = 1E-4
# average of absolute value of non-zero elements
absolute = np.absolute(array)
mean = 0.0 if (absolute < epsilon).all() else absolute[absolute > epsilon].mean()
a = (parameters.factor / 100.0) * mean
b = parameters.decay
def generate_weights (v, a, b):
# weight = (a / abs(v)) * (abs(v) / a)**b
weight = 0.0 if abs(v) < epsilon else (a ** (1 - b)) * (abs(v) ** (b - 1))
return weight
vgenerate_weights = np.vectorize(generate_weights)
weights: np.ndarray = vgenerate_weights(np.copy(array), a, b)
if self.debug:
MathTools.print_array("Weights", weights)
MathTools.analyze_array("Gradient", array, color = Colors.BrightCyan)
MathTools.analyze_array("Attenuation Weights", weights, color = Colors.BrightMagenta)
attenuated_array = weights * array
return attenuated_array
| 33.094737 | 111 | 0.604008 |
import numpy as np
from mathtools import MathTools
from logger import Logger
from services import Services
from tools import Colors
class AttenuationParameters:
def __init__(self, enabled: bool, factor: float, decay: float) -> None:
self.enabled: bool = enabled
self.factor: float = factor
self.decay: float = decay
class Attenuation:
def __init__(self, services : Services) -> None:
self.debug = False
self.services = services
def apply (self, array: np.ndarray, parameters: AttenuationParameters) -> np.ndarray:
epsilon = 1E-4
absolute = np.absolute(array)
mean = 0.0 if (absolute < epsilon).all() else absolute[absolute > epsilon].mean()
a = (parameters.factor / 100.0) * mean
b = parameters.decay
def generate_weights (v, a, b):
weight = 0.0 if abs(v) < epsilon else (a ** (1 - b)) * (abs(v) ** (b - 1))
return weight
vgenerate_weights = np.vectorize(generate_weights)
weights: np.ndarray = vgenerate_weights(np.copy(array), a, b)
if self.debug:
MathTools.print_array("Weights", weights)
MathTools.analyze_array("Gradient", array, color = Colors.BrightCyan)
MathTools.analyze_array("Attenuation Weights", weights, color = Colors.BrightMagenta)
attenuated_array = weights * array
return attenuated_array
| true | true |
1c3b29ba50eb8e9e4d4eebbaa51ccf0760786e20 | 1,101 | py | Python | util/config/validators/test/test_validate_timemachine.py | anwarchk/quay | 23c5120790c619174e7d36784ca5aab7f4eece5c | [
"Apache-2.0"
] | 1 | 2021-05-30T01:54:21.000Z | 2021-05-30T01:54:21.000Z | util/config/validators/test/test_validate_timemachine.py | anwarchk/quay | 23c5120790c619174e7d36784ca5aab7f4eece5c | [
"Apache-2.0"
] | 20 | 2019-12-26T17:32:34.000Z | 2022-03-21T22:18:06.000Z | util/config/validators/test/test_validate_timemachine.py | anwarchk/quay | 23c5120790c619174e7d36784ca5aab7f4eece5c | [
"Apache-2.0"
] | 1 | 2020-05-31T16:28:40.000Z | 2020-05-31T16:28:40.000Z | import pytest
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_timemachine import TimeMachineValidator
@pytest.mark.parametrize('unvalidated_config', [
({}),
])
def test_validate_noop(unvalidated_config):
TimeMachineValidator.validate(ValidatorContext(unvalidated_config))
from test.fixtures import *
@pytest.mark.parametrize('default_exp,options,expected_exception', [
('2d', ['1w', '2d'], None),
('2d', ['1w'], 'Default expiration must be in expiration options set'),
('2d', ['2d', '1M'], 'Invalid tag expiration option: 1M'),
])
def test_validate(default_exp, options, expected_exception, app):
config = {}
config['DEFAULT_TAG_EXPIRATION'] = default_exp
config['TAG_EXPIRATION_OPTIONS'] = options
if expected_exception is not None:
with pytest.raises(ConfigValidationException) as cve:
TimeMachineValidator.validate(ValidatorContext(config))
assert str(cve.value) == str(expected_exception)
else:
TimeMachineValidator.validate(ValidatorContext(config))
| 33.363636 | 76 | 0.768392 | import pytest
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_timemachine import TimeMachineValidator
@pytest.mark.parametrize('unvalidated_config', [
({}),
])
def test_validate_noop(unvalidated_config):
TimeMachineValidator.validate(ValidatorContext(unvalidated_config))
from test.fixtures import *
@pytest.mark.parametrize('default_exp,options,expected_exception', [
('2d', ['1w', '2d'], None),
('2d', ['1w'], 'Default expiration must be in expiration options set'),
('2d', ['2d', '1M'], 'Invalid tag expiration option: 1M'),
])
def test_validate(default_exp, options, expected_exception, app):
config = {}
config['DEFAULT_TAG_EXPIRATION'] = default_exp
config['TAG_EXPIRATION_OPTIONS'] = options
if expected_exception is not None:
with pytest.raises(ConfigValidationException) as cve:
TimeMachineValidator.validate(ValidatorContext(config))
assert str(cve.value) == str(expected_exception)
else:
TimeMachineValidator.validate(ValidatorContext(config))
| true | true |
1c3b2b67143c8d3c3bc076c3bd8118dfd8c9e3d7 | 5,649 | py | Python | bloomstack_core/hook_events/delivery_note.py | harshmule-git/bloomstack_core | a22fc1e6dc006e909c79914acc82f3827f1769ee | [
"MIT"
] | 4 | 2021-03-01T08:44:39.000Z | 2021-12-21T18:02:14.000Z | bloomstack_core/hook_events/delivery_note.py | harshmule-git/bloomstack_core | a22fc1e6dc006e909c79914acc82f3827f1769ee | [
"MIT"
] | 23 | 2020-10-12T10:46:35.000Z | 2021-11-02T08:23:01.000Z | bloomstack_core/hook_events/delivery_note.py | harshmule-git/bloomstack_core | a22fc1e6dc006e909c79914acc82f3827f1769ee | [
"MIT"
] | 33 | 2020-10-09T13:24:10.000Z | 2022-02-01T20:59:03.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Bloom Stack and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.utils import cstr, get_host_name
from bloomstack_core.bloomtrace import get_bloomtrace_client, make_integration_request
def create_integration_request(doc, method):
make_integration_request(doc.doctype, doc.name, "Package")
make_integration_request(doc.doctype, doc.name, "Transfer")
def link_invoice_against_delivery_note(delivery_note, method):
for item in delivery_note.items:
if item.against_sales_order and not item.against_sales_invoice:
sales_invoice_details = frappe.get_all("Sales Invoice Item",
filters={"docstatus": 1, "sales_order": item.against_sales_order},
fields=["distinct(parent)", "delivery_note"])
if sales_invoice_details and len(sales_invoice_details) == 1:
if sales_invoice_details[0].delivery_note:
continue
frappe.db.set_value("Delivery Note Item", item.name,
"against_sales_invoice", sales_invoice_details[0].parent)
def execute_bloomtrace_integration_request():
frappe_client = get_bloomtrace_client()
if not frappe_client:
return
pending_requests = frappe.get_all("Integration Request",
filters={
"status": ["IN", ["Queued", "Failed"]],
"reference_doctype": "Delivery Note",
"integration_request_service": "BloomTrace"
},
order_by="creation ASC",
limit=50)
for request in pending_requests:
integration_request = frappe.get_doc("Integration Request", request.name)
delivery_note = frappe.get_doc("Delivery Note", integration_request.reference_docname)
try:
error, status = "", "Completed"
if integration_request.endpoint == "Package":
if not delivery_note.is_return:
insert_delivery_payload(delivery_note, frappe_client)
else:
error, status = "Delivery Note is marked as return", "Failed"
if integration_request.endpoint == "Transfer":
if delivery_note.lr_no or (delivery_note.estimated_arrival and delivery_note.departure_time):
# If delivery trip is created or estimated_arrival and departure_time is present, only then move forward to integrate with BloomTrace
insert_transfer_template(delivery_note, frappe_client)
else:
error, status = "Delivery Trip / Estimated Departure / Estimated Arrival is missing", "Failed"
integration_request.error = error
integration_request.status = status
integration_request.save(ignore_permissions=True)
except Exception as e:
integration_request.error = cstr(frappe.get_traceback())
integration_request.status = "Failed"
integration_request.save(ignore_permissions=True)
def insert_transfer_template(delivery_note, frappe_client):
estimated_arrival = delivery_note.estimated_arrival
departure_time = delivery_note.departure_time
if delivery_note.lr_no:
delivery_trip = frappe.get_doc("Delivery Trip", delivery_note.lr_no)
for stop in delivery_trip.delivery_stops:
if stop.delivery_note == delivery_note.name:
estimated_arrival = stop.estimated_arrival
if not estimated_arrival:
try:
delivery_trip.process_route(False)
except Exception:
frappe.throw(_("Estimated Arrival Times are not present."))
if not departure_time:
departure_time = delivery_trip.departure_time
transfer_template_packages = []
for item in delivery_note.items:
if item.package_tag:
transfer_template_packages.append({
"package_tag": item.package_tag,
"wholesale_price": item.amount
})
site_url = get_host_name()
transfer_template = {
"doctype": "Transfer Template",
"bloomstack_company": delivery_note.company,
"delivery_note": delivery_note.name,
"transporter_facility_license": frappe.db.get_value("Company", delivery_note.company, "license"),
"transporter_phone": frappe.db.get_value("Company", delivery_note.company, "phone_no"),
"recipient_license_number": delivery_note.license,
"vechile_make": frappe.db.get_value("Vehicle", delivery_note.vehicle_no, "make"),
"vehicle_model": frappe.db.get_value("Vehicle", delivery_note.vehicle_no, "model"),
"vehicle_license_plate_number": delivery_note.vehicle_no,
"driver_name": delivery_note.driver_name,
"driver_license_number": frappe.db.get_value("Driver", delivery_note.driver, "license_number"),
"estimated_departure": departure_time,
"estimated_arrival": estimated_arrival,
"packages": transfer_template_packages
}
frappe_client.insert(transfer_template)
def insert_delivery_payload(delivery_note, frappe_client):
"""
Create the request body for package doctype in bloomtrace from a Delivery Note.
Args:
delivery_note (object): The `Delivery Note` Frappe object.
Returns:
payload (list of dict): The `Delivery Note` payload, if an Item is moved / created, otherwise `None` is reported to BloomTrace
"""
for item in delivery_note.items:
payload = {}
package_ingredients = []
if item.package_tag:
source_package_tag = frappe.db.get_value("Package Tag", item.package_tag, "source_package_tag")
if source_package_tag:
package_ingredients.append({
"package": source_package_tag,
"quantity": item.qty,
"unit_of_measure": item.uom,
})
elif item.warehouse:
payload = {
"tag": item.package_tag,
"item": item.item_name,
"quantity": item.qty,
"unit_of_measure": item.uom,
"patient_license_number": "",
"actual_date": delivery_note.lr_date or delivery_note.posting_date
}
if not payload:
return
payload["doctype"] = "Package"
payload["Ingredients"] = package_ingredients
payload["bloomstack_company"] = delivery_note.company
frappe_client.insert(payload)
| 35.980892 | 138 | 0.761197 |
import frappe
from frappe import _
from frappe.utils import cstr, get_host_name
from bloomstack_core.bloomtrace import get_bloomtrace_client, make_integration_request
def create_integration_request(doc, method):
make_integration_request(doc.doctype, doc.name, "Package")
make_integration_request(doc.doctype, doc.name, "Transfer")
def link_invoice_against_delivery_note(delivery_note, method):
for item in delivery_note.items:
if item.against_sales_order and not item.against_sales_invoice:
sales_invoice_details = frappe.get_all("Sales Invoice Item",
filters={"docstatus": 1, "sales_order": item.against_sales_order},
fields=["distinct(parent)", "delivery_note"])
if sales_invoice_details and len(sales_invoice_details) == 1:
if sales_invoice_details[0].delivery_note:
continue
frappe.db.set_value("Delivery Note Item", item.name,
"against_sales_invoice", sales_invoice_details[0].parent)
def execute_bloomtrace_integration_request():
frappe_client = get_bloomtrace_client()
if not frappe_client:
return
pending_requests = frappe.get_all("Integration Request",
filters={
"status": ["IN", ["Queued", "Failed"]],
"reference_doctype": "Delivery Note",
"integration_request_service": "BloomTrace"
},
order_by="creation ASC",
limit=50)
for request in pending_requests:
integration_request = frappe.get_doc("Integration Request", request.name)
delivery_note = frappe.get_doc("Delivery Note", integration_request.reference_docname)
try:
error, status = "", "Completed"
if integration_request.endpoint == "Package":
if not delivery_note.is_return:
insert_delivery_payload(delivery_note, frappe_client)
else:
error, status = "Delivery Note is marked as return", "Failed"
if integration_request.endpoint == "Transfer":
if delivery_note.lr_no or (delivery_note.estimated_arrival and delivery_note.departure_time):
insert_transfer_template(delivery_note, frappe_client)
else:
error, status = "Delivery Trip / Estimated Departure / Estimated Arrival is missing", "Failed"
integration_request.error = error
integration_request.status = status
integration_request.save(ignore_permissions=True)
except Exception as e:
integration_request.error = cstr(frappe.get_traceback())
integration_request.status = "Failed"
integration_request.save(ignore_permissions=True)
def insert_transfer_template(delivery_note, frappe_client):
estimated_arrival = delivery_note.estimated_arrival
departure_time = delivery_note.departure_time
if delivery_note.lr_no:
delivery_trip = frappe.get_doc("Delivery Trip", delivery_note.lr_no)
for stop in delivery_trip.delivery_stops:
if stop.delivery_note == delivery_note.name:
estimated_arrival = stop.estimated_arrival
if not estimated_arrival:
try:
delivery_trip.process_route(False)
except Exception:
frappe.throw(_("Estimated Arrival Times are not present."))
if not departure_time:
departure_time = delivery_trip.departure_time
transfer_template_packages = []
for item in delivery_note.items:
if item.package_tag:
transfer_template_packages.append({
"package_tag": item.package_tag,
"wholesale_price": item.amount
})
site_url = get_host_name()
transfer_template = {
"doctype": "Transfer Template",
"bloomstack_company": delivery_note.company,
"delivery_note": delivery_note.name,
"transporter_facility_license": frappe.db.get_value("Company", delivery_note.company, "license"),
"transporter_phone": frappe.db.get_value("Company", delivery_note.company, "phone_no"),
"recipient_license_number": delivery_note.license,
"vechile_make": frappe.db.get_value("Vehicle", delivery_note.vehicle_no, "make"),
"vehicle_model": frappe.db.get_value("Vehicle", delivery_note.vehicle_no, "model"),
"vehicle_license_plate_number": delivery_note.vehicle_no,
"driver_name": delivery_note.driver_name,
"driver_license_number": frappe.db.get_value("Driver", delivery_note.driver, "license_number"),
"estimated_departure": departure_time,
"estimated_arrival": estimated_arrival,
"packages": transfer_template_packages
}
frappe_client.insert(transfer_template)
def insert_delivery_payload(delivery_note, frappe_client):
for item in delivery_note.items:
payload = {}
package_ingredients = []
if item.package_tag:
source_package_tag = frappe.db.get_value("Package Tag", item.package_tag, "source_package_tag")
if source_package_tag:
package_ingredients.append({
"package": source_package_tag,
"quantity": item.qty,
"unit_of_measure": item.uom,
})
elif item.warehouse:
payload = {
"tag": item.package_tag,
"item": item.item_name,
"quantity": item.qty,
"unit_of_measure": item.uom,
"patient_license_number": "",
"actual_date": delivery_note.lr_date or delivery_note.posting_date
}
if not payload:
return
payload["doctype"] = "Package"
payload["Ingredients"] = package_ingredients
payload["bloomstack_company"] = delivery_note.company
frappe_client.insert(payload)
| true | true |
1c3b2bd14abfd96bf72a56c8aadcab303109db52 | 95 | py | Python | nn_script/test_config.py | Lotuslisa/semantic_segmentation | b53932bac95371af4631b04e16e2c5d182ae78ae | [
"Apache-2.0"
] | null | null | null | nn_script/test_config.py | Lotuslisa/semantic_segmentation | b53932bac95371af4631b04e16e2c5d182ae78ae | [
"Apache-2.0"
] | null | null | null | nn_script/test_config.py | Lotuslisa/semantic_segmentation | b53932bac95371af4631b04e16e2c5d182ae78ae | [
"Apache-2.0"
] | null | null | null | #from config import params
import config
#print(config.params)
print(config.p_img_h)
print(a)
| 13.571429 | 26 | 0.789474 |
import config
print(config.p_img_h)
print(a)
| true | true |
1c3b2c2bee6770ec4ad925047cf3827cb50649b1 | 10,701 | py | Python | digits/task.py | Itsuro/DIGITS | 7754c7de7085ebbae16875b3d5a16c42b6e24a7b | [
"BSD-3-Clause"
] | 1 | 2021-07-22T04:35:09.000Z | 2021-07-22T04:35:09.000Z | digits/task.py | Itsuro/DIGITS | 7754c7de7085ebbae16875b3d5a16c42b6e24a7b | [
"BSD-3-Clause"
] | null | null | null | digits/task.py | Itsuro/DIGITS | 7754c7de7085ebbae16875b3d5a16c42b6e24a7b | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import logging
import os.path
import platform
import re
import signal
import subprocess
import time
import flask
import gevent.event
from . import utils
from .config import config_value
from .status import Status, StatusCls
import digits.log
# NOTE: Increment this everytime the pickled version changes
PICKLE_VERSION = 1
class Task(StatusCls):
"""
Base class for Tasks
A Task is a compute-heavy operation that runs in a separate executable
Communication is done by processing the stdout of the executable
"""
def __init__(self, job_dir, parents=None):
super(Task, self).__init__()
self.pickver_task = PICKLE_VERSION
self.job_dir = job_dir
self.job_id = os.path.basename(job_dir)
if parents is None:
self.parents = None
elif isinstance(parents, (list, tuple)):
self.parents = parents
elif isinstance(parents, Task):
self.parents = [parents]
else:
raise TypeError('parents is %s' % type(parents))
self.exception = None
self.traceback = None
self.aborted = gevent.event.Event()
self.set_logger()
def __getstate__(self):
d = self.__dict__.copy()
if 'aborted' in d:
del d['aborted']
if 'logger' in d:
del d['logger']
return d
def __setstate__(self, state):
self.__dict__ = state
self.aborted = gevent.event.Event()
self.set_logger()
def set_logger(self):
self.logger = digits.log.JobIdLoggerAdapter(
logging.getLogger('digits.webapp'),
{'job_id': self.job_id},
)
def name(self):
"""
Returns a string
"""
raise NotImplementedError
def html_id(self):
"""
Returns a string
"""
return 'task-%s' % id(self)
def on_status_update(self):
"""
Called when StatusCls.status.setter is used
"""
from digits.webapp import app, socketio
# Send socketio updates
message = {
'task': self.html_id(),
'update': 'status',
'status': self.status.name,
'css': self.status.css,
'show': (self.status in [Status.RUN, Status.ERROR]),
'running': self.status.is_running(),
}
with app.app_context():
message['html'] = flask.render_template('status_updates.html',
updates = self.status_history,
exception = self.exception,
traceback = self.traceback,
)
socketio.emit('task update',
message,
namespace='/jobs',
room=self.job_id,
)
def path(self, filename, relative=False):
"""
Returns a path to the given file
Arguments:
filename -- the requested file
Keyword arguments:
relative -- If False, return an absolute path to the file
If True, return a path relative to the jobs directory
"""
if not filename:
return None
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self.job_dir, filename)
if relative:
path = os.path.relpath(path, config_value('jobs_dir'))
return str(path).replace("\\","/")
def ready_to_queue(self):
"""
Returns True if all parents are done
"""
if not self.parents:
return True
for parent in self.parents:
if parent.status != Status.DONE:
return False
return True
def offer_resources(self, resources):
"""
Check the available resources and return a set of requested resources
Arguments:
resources -- a copy of scheduler.resources
"""
raise NotImplementedError
def task_arguments(self, resources, env):
"""
Returns args used by subprocess.Popen to execute the task
Returns False if the args cannot be set properly
Arguments:
resources -- the resources assigned by the scheduler for this task
environ -- os.environ instance to run process in
"""
raise NotImplementedError
def before_run(self):
"""
Called before run() executes
Raises exceptions
"""
pass
def run(self, resources):
"""
Execute the task
Arguments:
resources -- the resources assigned by the scheduler for this task
"""
self.before_run()
env = os.environ.copy()
args = self.task_arguments(resources, env )
if not args:
self.logger.error('Could not create the arguments for Popen')
self.status = Status.ERROR
return False
# Convert them all to strings
args = [str(x) for x in args]
self.logger.info('%s task started.' % self.name())
self.status = Status.RUN
unrecognized_output = []
import sys
env['PYTHONPATH'] = os.pathsep.join(['.', self.job_dir, env.get('PYTHONPATH', '')] + sys.path)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=False if platform.system() == 'Windows' else True,
env=env,
)
try:
sigterm_time = None # When was the SIGTERM signal sent
sigterm_timeout = 2 # When should the SIGKILL signal be sent
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if self.aborted.is_set():
if sigterm_time is None:
# Attempt graceful shutdown
p.send_signal(signal.SIGTERM)
sigterm_time = time.time()
self.status = Status.ABORT
break
if line is not None:
# Remove whitespace
line = line.strip()
if line:
if not self.process_output(line):
self.logger.warning('%s unrecognized output: %s' % (self.name(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
if sigterm_time is not None and (time.time() - sigterm_time > sigterm_timeout):
p.send_signal(signal.SIGKILL)
self.logger.warning('Sent SIGKILL to task "%s"' % self.name())
time.sleep(0.1)
except:
p.terminate()
self.after_run()
raise
self.after_run()
if self.status != Status.RUN:
return False
elif p.returncode != 0:
self.logger.error('%s task failed with error code %d' % (self.name(), p.returncode))
if self.exception is None:
self.exception = 'error code %d' % p.returncode
if unrecognized_output:
if self.traceback is None:
self.traceback = '\n'.join(unrecognized_output)
else:
self.traceback = self.traceback + ('\n'.join(unrecognized_output))
self.after_runtime_error()
self.status = Status.ERROR
return False
else:
self.logger.info('%s task completed.' % self.name())
self.status = Status.DONE
return True
def abort(self):
"""
Abort the Task
"""
if self.status.is_running():
self.aborted.set()
def preprocess_output_digits(self, line):
"""
Takes line of output and parses it according to DIGITS's log format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# YYYY-MM-DD HH:MM:SS [LEVEL] message
match = re.match(r'(\S{10} \S{8}) \[(\w+)\s*\] (.*)$', line)
if match:
timestr = match.group(1)
timestamp = time.mktime(time.strptime(timestr, digits.log.DATE_FORMAT))
level = match.group(2)
message = match.group(3)
if level.startswith('DEB'):
level = 'debug'
elif level.startswith('INF'):
level = 'info'
elif level.startswith('WAR'):
level = 'warning'
elif level.startswith('ERR'):
level = 'error'
elif level.startswith('CRI'):
level = 'critical'
return (timestamp, level, message)
else:
return (None, None, None)
def process_output(self, line):
"""
Process a line of output from the task
Returns True if the output was able to be processed
Arguments:
line -- a line of output
"""
raise NotImplementedError
def est_done(self):
"""
Returns the estimated time in seconds until the task is done
"""
if self.status != Status.RUN or self.progress == 0:
return None
elapsed = time.time() - self.status_history[-1][1]
return (1 - self.progress) * elapsed // self.progress
def after_run(self):
"""
Called after run() executes
"""
pass
def after_runtime_error(self):
"""
Called after a runtime error during run()
"""
pass
def emit_progress_update(self):
"""
Call socketio.emit for task progress update, and trigger job progress update.
"""
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'progress',
'percentage': int(round(100*self.progress)),
'eta': utils.time_filters.print_time_diff(self.est_done()),
},
namespace='/jobs',
room=self.job_id,
)
from digits.webapp import scheduler
job = scheduler.get_job(self.job_id)
if job:
job.emit_progress_update()
| 31.017391 | 107 | 0.530698 |
from __future__ import absolute_import
import logging
import os.path
import platform
import re
import signal
import subprocess
import time
import flask
import gevent.event
from . import utils
from .config import config_value
from .status import Status, StatusCls
import digits.log
PICKLE_VERSION = 1
class Task(StatusCls):
def __init__(self, job_dir, parents=None):
super(Task, self).__init__()
self.pickver_task = PICKLE_VERSION
self.job_dir = job_dir
self.job_id = os.path.basename(job_dir)
if parents is None:
self.parents = None
elif isinstance(parents, (list, tuple)):
self.parents = parents
elif isinstance(parents, Task):
self.parents = [parents]
else:
raise TypeError('parents is %s' % type(parents))
self.exception = None
self.traceback = None
self.aborted = gevent.event.Event()
self.set_logger()
def __getstate__(self):
d = self.__dict__.copy()
if 'aborted' in d:
del d['aborted']
if 'logger' in d:
del d['logger']
return d
def __setstate__(self, state):
self.__dict__ = state
self.aborted = gevent.event.Event()
self.set_logger()
def set_logger(self):
self.logger = digits.log.JobIdLoggerAdapter(
logging.getLogger('digits.webapp'),
{'job_id': self.job_id},
)
def name(self):
raise NotImplementedError
def html_id(self):
return 'task-%s' % id(self)
def on_status_update(self):
from digits.webapp import app, socketio
message = {
'task': self.html_id(),
'update': 'status',
'status': self.status.name,
'css': self.status.css,
'show': (self.status in [Status.RUN, Status.ERROR]),
'running': self.status.is_running(),
}
with app.app_context():
message['html'] = flask.render_template('status_updates.html',
updates = self.status_history,
exception = self.exception,
traceback = self.traceback,
)
socketio.emit('task update',
message,
namespace='/jobs',
room=self.job_id,
)
def path(self, filename, relative=False):
if not filename:
return None
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self.job_dir, filename)
if relative:
path = os.path.relpath(path, config_value('jobs_dir'))
return str(path).replace("\\","/")
def ready_to_queue(self):
if not self.parents:
return True
for parent in self.parents:
if parent.status != Status.DONE:
return False
return True
def offer_resources(self, resources):
raise NotImplementedError
def task_arguments(self, resources, env):
raise NotImplementedError
def before_run(self):
pass
def run(self, resources):
self.before_run()
env = os.environ.copy()
args = self.task_arguments(resources, env )
if not args:
self.logger.error('Could not create the arguments for Popen')
self.status = Status.ERROR
return False
args = [str(x) for x in args]
self.logger.info('%s task started.' % self.name())
self.status = Status.RUN
unrecognized_output = []
import sys
env['PYTHONPATH'] = os.pathsep.join(['.', self.job_dir, env.get('PYTHONPATH', '')] + sys.path)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=False if platform.system() == 'Windows' else True,
env=env,
)
try:
sigterm_time = None
sigterm_timeout = 2
while p.poll() is None:
for line in utils.nonblocking_readlines(p.stdout):
if self.aborted.is_set():
if sigterm_time is None:
p.send_signal(signal.SIGTERM)
sigterm_time = time.time()
self.status = Status.ABORT
break
if line is not None:
line = line.strip()
if line:
if not self.process_output(line):
self.logger.warning('%s unrecognized output: %s' % (self.name(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
if sigterm_time is not None and (time.time() - sigterm_time > sigterm_timeout):
p.send_signal(signal.SIGKILL)
self.logger.warning('Sent SIGKILL to task "%s"' % self.name())
time.sleep(0.1)
except:
p.terminate()
self.after_run()
raise
self.after_run()
if self.status != Status.RUN:
return False
elif p.returncode != 0:
self.logger.error('%s task failed with error code %d' % (self.name(), p.returncode))
if self.exception is None:
self.exception = 'error code %d' % p.returncode
if unrecognized_output:
if self.traceback is None:
self.traceback = '\n'.join(unrecognized_output)
else:
self.traceback = self.traceback + ('\n'.join(unrecognized_output))
self.after_runtime_error()
self.status = Status.ERROR
return False
else:
self.logger.info('%s task completed.' % self.name())
self.status = Status.DONE
return True
def abort(self):
if self.status.is_running():
self.aborted.set()
def preprocess_output_digits(self, line):
match = re.match(r'(\S{10} \S{8}) \[(\w+)\s*\] (.*)$', line)
if match:
timestr = match.group(1)
timestamp = time.mktime(time.strptime(timestr, digits.log.DATE_FORMAT))
level = match.group(2)
message = match.group(3)
if level.startswith('DEB'):
level = 'debug'
elif level.startswith('INF'):
level = 'info'
elif level.startswith('WAR'):
level = 'warning'
elif level.startswith('ERR'):
level = 'error'
elif level.startswith('CRI'):
level = 'critical'
return (timestamp, level, message)
else:
return (None, None, None)
def process_output(self, line):
raise NotImplementedError
def est_done(self):
if self.status != Status.RUN or self.progress == 0:
return None
elapsed = time.time() - self.status_history[-1][1]
return (1 - self.progress) * elapsed // self.progress
def after_run(self):
pass
def after_runtime_error(self):
pass
def emit_progress_update(self):
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'progress',
'percentage': int(round(100*self.progress)),
'eta': utils.time_filters.print_time_diff(self.est_done()),
},
namespace='/jobs',
room=self.job_id,
)
from digits.webapp import scheduler
job = scheduler.get_job(self.job_id)
if job:
job.emit_progress_update()
| true | true |
1c3b2cc148e2f79f5a4ed7484eacb3e5b47fe720 | 5,676 | py | Python | sarveillance/utils.py | ropg/SARveillance | 94c0b348df4fc5b9ee532aadfe3514e105441a74 | [
"MIT"
] | null | null | null | sarveillance/utils.py | ropg/SARveillance | 94c0b348df4fc5b9ee532aadfe3514e105441a74 | [
"MIT"
] | null | null | null | sarveillance/utils.py | ropg/SARveillance | 94c0b348df4fc5b9ee532aadfe3514e105441a74 | [
"MIT"
] | null | null | null | import os
import subprocess
import ee
from geemap import png_to_gif
import matplotlib.pyplot as plt
from geemap.cartoee import get_map, add_gridlines, add_scale_bar_lite, add_north_arrow
def new_get_image_collection_gif(
ee_ic,
out_dir,
out_gif,
vis_params,
region,
cmap=None,
proj=None,
fps=10,
mp4=False,
grid_interval=None,
plot_title="",
date_format="YYYY-MM-dd",
fig_size=(10, 10),
dpi_plot=100,
file_format="png",
north_arrow_dict={},
scale_bar_dict={},
verbose=True,
):
"""Download all the images in an image collection and use them to generate a gif/video.
Args:
ee_ic (object): ee.ImageCollection
out_dir (str): The output directory of images and video.
out_gif (str): The name of the gif file.
vis_params (dict): Visualization parameters as a dictionary.
region (list | tuple): Geospatial region of the image to render in format [E,S,W,N].
fps (int, optional): Video frames per second. Defaults to 10.
mp4 (bool, optional): Whether to create mp4 video.
grid_interval (float | tuple[float]): Float specifying an interval at which to create gridlines, units are decimal degrees. lists will be interpreted a (x_interval, y_interval), such as (0.1, 0.1). Defaults to None.
plot_title (str): Plot title. Defaults to "".
date_format (str, optional): A pattern, as described at http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. Defaults to "YYYY-MM-dd".
fig_size (tuple, optional): Size of the figure.
dpi_plot (int, optional): The resolution in dots per inch of the plot.
file_format (str, optional): Either 'png' or 'jpg'.
north_arrow_dict (dict, optional): Parameters for the north arrow. See https://geemap.org/cartoee/#geemap.cartoee.add_north_arrow. Defaults to {}.
scale_bar_dict (dict, optional): Parameters for the scale bar. See https://geemap.org/cartoee/#geemap.cartoee.add_scale_bar. Defaults. to {}.
verbose (bool, optional): Whether or not to print text when the program is running. Defaults to True.
"""
# from .geemap import png_to_gif
out_dir = os.path.abspath(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_gif = os.path.join(out_dir, out_gif)
count = int(ee_ic.size().getInfo())
names = ee_ic.aggregate_array("system:index").getInfo()
images = ee_ic.toList(count)
dates = ee_ic.aggregate_array("system:time_start")
dates = dates.map(lambda d: ee.Date(d).format(date_format)).getInfo()
# list of file name
img_list = []
for i, date in enumerate(dates):
image = ee.Image(images.get(i))
name = str(names[i])
# name = name + "." + file_format
name = str(i).zfill(3) + "_" + name + "." + file_format
out_img = os.path.join(out_dir, name)
img_list.append(out_img)
if verbose:
print(f"Downloading {i+1}/{count}: {name} ...")
# Size plot
plt.figure(figsize=fig_size)
# Plot image
ax = get_map(image, region=region, vis_params=vis_params, cmap=cmap, proj=proj)
# Add grid
if grid_interval is not None:
add_gridlines(ax, interval=grid_interval, linestyle=":")
# Add title
if len(plot_title) > 0:
ax.set_title(label=plot_title + " " + date + "\n", fontsize=15)
# Add scale bar
if len(scale_bar_dict) > 0:
add_scale_bar_lite(ax, **scale_bar_dict)
# Add north arrow
if len(north_arrow_dict) > 0:
add_north_arrow(ax, **north_arrow_dict)
# Save plot
plt.savefig(fname=out_img, dpi=dpi_plot)
plt.clf()
plt.close()
out_gif = os.path.abspath(out_gif)
png_to_gif(out_dir, out_gif, fps)
if verbose:
print(f"GIF saved to {out_gif}")
if mp4:
video_filename = out_gif.replace(".gif", ".mp4")
try:
import cv2
except ImportError:
print("Installing opencv-python ...")
subprocess.check_call(["python", "-m", "pip", "install", "opencv-python"])
import cv2
# Video file name
output_video_file_name = os.path.join(out_dir, video_filename)
frame = cv2.imread(img_list[0])
height, width, _ = frame.shape
frame_size = (width, height)
fps_video = fps
# Make mp4
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
# Function
def convert_frames_to_video(
input_list, output_video_file_name, fps_video, frame_size
):
"""Convert frames to video
Args:
input_list (list): Downloaded Image Name List.
output_video_file_name (str): The name of the video file in the image directory.
fps_video (int): Video frames per second.
frame_size (tuple): Frame size.
"""
out = cv2.VideoWriter(output_video_file_name, fourcc, fps_video, frame_size)
num_frames = len(input_list)
for i in range(num_frames):
img_path = input_list[i]
img = cv2.imread(img_path)
out.write(img)
out.release()
cv2.destroyAllWindows()
# Use function
convert_frames_to_video(
input_list=img_list,
output_video_file_name=output_video_file_name,
fps_video=fps_video,
frame_size=frame_size,
)
if verbose:
print(f"MP4 saved to {output_video_file_name}")
| 34.4 | 223 | 0.619626 | import os
import subprocess
import ee
from geemap import png_to_gif
import matplotlib.pyplot as plt
from geemap.cartoee import get_map, add_gridlines, add_scale_bar_lite, add_north_arrow
def new_get_image_collection_gif(
ee_ic,
out_dir,
out_gif,
vis_params,
region,
cmap=None,
proj=None,
fps=10,
mp4=False,
grid_interval=None,
plot_title="",
date_format="YYYY-MM-dd",
fig_size=(10, 10),
dpi_plot=100,
file_format="png",
north_arrow_dict={},
scale_bar_dict={},
verbose=True,
):
out_dir = os.path.abspath(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_gif = os.path.join(out_dir, out_gif)
count = int(ee_ic.size().getInfo())
names = ee_ic.aggregate_array("system:index").getInfo()
images = ee_ic.toList(count)
dates = ee_ic.aggregate_array("system:time_start")
dates = dates.map(lambda d: ee.Date(d).format(date_format)).getInfo()
img_list = []
for i, date in enumerate(dates):
image = ee.Image(images.get(i))
name = str(names[i])
name = str(i).zfill(3) + "_" + name + "." + file_format
out_img = os.path.join(out_dir, name)
img_list.append(out_img)
if verbose:
print(f"Downloading {i+1}/{count}: {name} ...")
plt.figure(figsize=fig_size)
ax = get_map(image, region=region, vis_params=vis_params, cmap=cmap, proj=proj)
if grid_interval is not None:
add_gridlines(ax, interval=grid_interval, linestyle=":")
if len(plot_title) > 0:
ax.set_title(label=plot_title + " " + date + "\n", fontsize=15)
if len(scale_bar_dict) > 0:
add_scale_bar_lite(ax, **scale_bar_dict)
if len(north_arrow_dict) > 0:
add_north_arrow(ax, **north_arrow_dict)
plt.savefig(fname=out_img, dpi=dpi_plot)
plt.clf()
plt.close()
out_gif = os.path.abspath(out_gif)
png_to_gif(out_dir, out_gif, fps)
if verbose:
print(f"GIF saved to {out_gif}")
if mp4:
video_filename = out_gif.replace(".gif", ".mp4")
try:
import cv2
except ImportError:
print("Installing opencv-python ...")
subprocess.check_call(["python", "-m", "pip", "install", "opencv-python"])
import cv2
output_video_file_name = os.path.join(out_dir, video_filename)
frame = cv2.imread(img_list[0])
height, width, _ = frame.shape
frame_size = (width, height)
fps_video = fps
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
def convert_frames_to_video(
input_list, output_video_file_name, fps_video, frame_size
):
out = cv2.VideoWriter(output_video_file_name, fourcc, fps_video, frame_size)
num_frames = len(input_list)
for i in range(num_frames):
img_path = input_list[i]
img = cv2.imread(img_path)
out.write(img)
out.release()
cv2.destroyAllWindows()
convert_frames_to_video(
input_list=img_list,
output_video_file_name=output_video_file_name,
fps_video=fps_video,
frame_size=frame_size,
)
if verbose:
print(f"MP4 saved to {output_video_file_name}")
| true | true |
1c3b2f7f03ede1591da94a2a82eba4306f1b4a39 | 1,252 | py | Python | src/ccc/multiset.py | ajcr/ccc | ba660ebd358a48ab3df7533b261f9597cede27be | [
"MIT"
] | 3 | 2019-10-07T09:38:15.000Z | 2020-01-20T16:36:26.000Z | src/ccc/multiset.py | ajcr/combination-calculator | 71108eef73bd2ea4367abe423251c7b6f21ba221 | [
"MIT"
] | 12 | 2019-02-10T22:01:48.000Z | 2020-06-06T21:31:51.000Z | src/ccc/multiset.py | ajcr/combination-calculator | 71108eef73bd2ea4367abe423251c7b6f21ba221 | [
"MIT"
] | null | null | null | from typing import AbstractSet, Dict, List, Optional, Tuple
from sympy import Poly, prod
from sympy.abc import x
from ccc.polynomialtracker import PolynomialTracker
class Multiset(PolynomialTracker):
"""
Track multisets that meet zero or more constraints.
"""
def __init__(
self,
size: int,
collection: Optional[Dict[str, int]] = None,
constraints: Optional[List[Tuple]] = None,
) -> None:
if constraints is None and collection is None:
raise ValueError("Must specify either 'constraints', 'collection', or both")
super().__init__(size, collection, constraints)
def count(self) -> int:
"""
Count number of possible multisets that meet constraints.
"""
poly = prod(degrees_to_polynomial(degrees) for degrees in self._degrees.values())
return poly.coeff_monomial(x ** self._max_degree)
def degrees_to_polynomial(degrees: AbstractSet[int]) -> Poly:
"""
For each degree in a set, create the polynomial with those
terms having coefficient 1 (and all other terms zero), e.g.:
{0, 2, 5} -> x**5 + x**2 + 1
"""
degrees_dict = dict.fromkeys(degrees, 1)
return Poly.from_dict(degrees_dict, x)
| 27.822222 | 89 | 0.653355 | from typing import AbstractSet, Dict, List, Optional, Tuple
from sympy import Poly, prod
from sympy.abc import x
from ccc.polynomialtracker import PolynomialTracker
class Multiset(PolynomialTracker):
def __init__(
self,
size: int,
collection: Optional[Dict[str, int]] = None,
constraints: Optional[List[Tuple]] = None,
) -> None:
if constraints is None and collection is None:
raise ValueError("Must specify either 'constraints', 'collection', or both")
super().__init__(size, collection, constraints)
def count(self) -> int:
poly = prod(degrees_to_polynomial(degrees) for degrees in self._degrees.values())
return poly.coeff_monomial(x ** self._max_degree)
def degrees_to_polynomial(degrees: AbstractSet[int]) -> Poly:
degrees_dict = dict.fromkeys(degrees, 1)
return Poly.from_dict(degrees_dict, x)
| true | true |
1c3b301f1807dcfd044bd7569726ac637e4756fe | 4,353 | py | Python | contrib/seeds/generate-seeds.py | Palem1988/ion_old | 2c2b532abf61e2a06231c1d3b4d9b2bd0cdb469a | [
"MIT"
] | 2 | 2017-01-16T13:42:19.000Z | 2017-01-16T17:14:59.000Z | contrib/seeds/generate-seeds.py | ionomy/ion_new | 759071e12ba2ab889221bf91d99bb052a3b98303 | [
"MIT"
] | 18 | 2017-01-19T09:19:48.000Z | 2017-01-27T01:59:30.000Z | contrib/seeds/generate-seeds.py | ionomy/ion_new | 759071e12ba2ab889221bf91d99bb052a3b98303 | [
"MIT"
] | 10 | 2017-01-17T19:54:55.000Z | 2017-02-11T19:26:43.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Copyright (c) 2017 CEVAP
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef ION_CHAINPARAMSSEEDS_H\n')
g.write('#define ION_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the ion network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 12700)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 27170)
g.write('#endif // ION_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.092857 | 98 | 0.578911 |
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef ION_CHAINPARAMSSEEDS_H\n')
g.write('#define ION_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the ion network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 12700)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 27170)
g.write('#endif // ION_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true | true |
1c3b318f2d6cd2c7a20239ca26ab5d61d0829056 | 327 | py | Python | aidants_connect_web/management/commands/notify_new_habilitation_requests.py | betagouv/Aidants_Connect | 2329d41545912460c4a43b5b41c892189fc1df11 | [
"MIT"
] | 16 | 2019-05-13T08:32:40.000Z | 2022-03-22T13:40:57.000Z | aidants_connect_web/management/commands/notify_new_habilitation_requests.py | betagouv/Aidants_Connect | 2329d41545912460c4a43b5b41c892189fc1df11 | [
"MIT"
] | 207 | 2019-05-15T16:30:52.000Z | 2022-03-31T15:26:25.000Z | aidants_connect_web/management/commands/notify_new_habilitation_requests.py | betagouv/Aidants_Connect | 2329d41545912460c4a43b5b41c892189fc1df11 | [
"MIT"
] | 12 | 2019-11-08T13:44:35.000Z | 2022-02-14T15:38:37.000Z | from django.core.management.base import BaseCommand
from aidants_connect_web.tasks import notify_new_habilitation_requests
class Command(BaseCommand):
help = "Notifies staff administrators that new habilitation requests are to be seen"
def handle(self, *args, **options):
notify_new_habilitation_requests()
| 29.727273 | 88 | 0.792049 | from django.core.management.base import BaseCommand
from aidants_connect_web.tasks import notify_new_habilitation_requests
class Command(BaseCommand):
help = "Notifies staff administrators that new habilitation requests are to be seen"
def handle(self, *args, **options):
notify_new_habilitation_requests()
| true | true |
1c3b325bf949db86cc6337fac1baaa24425c6b1b | 10,920 | py | Python | tods/detection_algorithm/PyodIsolationForest.py | KODeKarnage/tods | d1b5395d0d530630dc514638726face4d4796d01 | [
"Apache-2.0"
] | 1 | 2020-11-29T05:50:02.000Z | 2020-11-29T05:50:02.000Z | tods/detection_algorithm/PyodIsolationForest.py | MaxMohammadi/tods | 40e79269f14b60e090028188f1ed8380d518270f | [
"Apache-2.0"
] | null | null | null | tods/detection_algorithm/PyodIsolationForest.py | MaxMohammadi/tods | 40e79269f14b60e090028188f1ed8380d518270f | [
"Apache-2.0"
] | 1 | 2021-03-26T03:03:28.000Z | 2021-03-26T03:03:28.000Z | from typing import Any, Callable, List, Dict, Union, Optional, Sequence, Tuple
from numpy import ndarray
from collections import OrderedDict
from scipy import sparse
import os
import sklearn
import numpy
import typing
# Custom import commands if any
import warnings
import numpy as np
from sklearn.utils import check_array
from sklearn.exceptions import NotFittedError
# from numba import njit
from pyod.utils.utility import argmaxn
from d3m.container.numpy import ndarray as d3m_ndarray
from d3m.container import DataFrame as d3m_dataframe
from d3m.metadata import hyperparams, params, base as metadata_base
from d3m import utils
from d3m.base import utils as base_utils
from d3m.exceptions import PrimitiveNotFittedError
from d3m.primitive_interfaces.base import CallResult, DockerContainer
# from d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import ProbabilisticCompositionalityMixin, ContinueFitMixin
from d3m import exceptions
import pandas
from d3m import container, utils as d3m_utils
from .UODBasePrimitive import Params_ODBase, Hyperparams_ODBase, UnsupervisedOutlierDetectorBase
from pyod.models.iforest import IForest
from typing import Union
import uuid
Inputs = d3m_dataframe
Outputs = d3m_dataframe
class Params(Params_ODBase):
######## Add more Attributes #######
pass
class Hyperparams(Hyperparams_ODBase):
######## Add more Hyperparamters #######
n_estimators = hyperparams.Hyperparameter[int](
default=100,
description='The number of base estimators in the ensemble.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
max_samples = hyperparams.Enumeration[str](
values=['auto', 'int', 'float'],
default='auto', # 'box-cox', #
description='The number of samples to draw from X to train each base estimator.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
max_features = hyperparams.Hyperparameter[float](
default=1.,
description='The number of features to draw from X to train each base estimator.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
bootstrap = hyperparams.UniformBool(
default=False,
description='If True, individual trees are fit on random subsets of the training data sampled with replacement. If False, sampling without replacement is performed.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
behaviour = hyperparams.Enumeration[str](
values=['old', 'new'],
default='new',
description='Refer to https://github.com/yzhao062/pyod/blob/master/pyod/models/iforest.py.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
random_state = hyperparams.Union[Union[int, None]](
configuration=OrderedDict(
init=hyperparams.Hyperparameter[int](
default=0,
),
ninit=hyperparams.Hyperparameter[None](
default=None,
),
),
default='ninit',
description='the seed used by the random number generator.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
verbose = hyperparams.Hyperparameter[int](
default=0,
description='Controls the verbosity of the tree building process.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
pass
class IsolationForest(UnsupervisedOutlierDetectorBase[Inputs, Outputs, Params, Hyperparams]):
"""
Wrapper of Pyod Isolation Forest with more functionalities.
The IsolationForest 'isolates' observations by randomly selecting a
feature and then randomly selecting a split value between the maximum and
minimum values of the selected feature.
See :cite:`liu2008isolation,liu2012isolation` for details.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Parameters
----------
n_estimators : int, optional (default=100)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default="auto")
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : float in (0., 0.5), optional (default=0.1)
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the decision function.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, optional (default=False)
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
behaviour : str, default='old'
Behaviour of the ``decision_function`` which can be either 'old' or
'new'. Passing ``behaviour='new'`` makes the ``decision_function``
change to match other anomaly detection algorithm API which will be
the default behaviour in the future. As explained in details in the
``offset_`` attribute documentation, the ``decision_function`` becomes
dependent on the contamination parameter, in such a way that 0 becomes
its natural threshold to detect outliers.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
decision_scores_ : numpy array of shape (n_samples,)
The outlier scores of the training data.
The higher, the more abnormal. Outliers tend to have higher
scores. This value is available once the detector is
fitted.
threshold_ : float
The threshold is based on ``contamination``. It is the
``n_samples * contamination`` most abnormal samples in
``decision_scores_``. The threshold is calculated for generating
binary outlier labels.
labels_ : int, either 0 or 1
The binary labels of the training data. 0 stands for inliers
and 1 for outliers/anomalies. It is generated by applying
``threshold_`` on ``decision_scores_``.
"""
metadata = metadata_base.PrimitiveMetadata({
"name": "TODS.anomaly_detection_primitives.IsolationForest",
"python_path": "d3m.primitives.tods.detection_algorithm.pyod_iforest",
"source": {'name': "DATALAB @Taxes A&M University", 'contact': 'mailto:khlai037@tamu.edu',
'uris': ['https://gitlab.com/lhenry15/tods.git']},
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.ISOLATION_FOREST, ],
"primitive_family": metadata_base.PrimitiveFamily.ANOMALY_DETECTION,
"version": "0.0.1",
"hyperparams_to_tune": ['n_estimators', 'contamination'],
"id": str(uuid.uuid3(uuid.NAMESPACE_DNS, 'IsolationForest'))
})
def __init__(self, *,
hyperparams: Hyperparams, #
random_seed: int = 0,
docker_containers: Dict[str, DockerContainer] = None) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
self._clf = IForest(contamination=hyperparams['contamination'],
n_estimators=hyperparams['n_estimators'],
max_samples=hyperparams['max_samples'],
max_features=hyperparams['max_features'],
bootstrap=hyperparams['bootstrap'],
behaviour=hyperparams['behaviour'],
random_state=hyperparams['random_state'],
verbose=hyperparams['verbose'],
)
return
def set_training_data(self, *, inputs: Inputs) -> None:
"""
Set training data for outlier detection.
Args:
inputs: Container DataFrame
Returns:
None
"""
super().set_training_data(inputs=inputs)
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
"""
Fit model with training data.
Args:
*: Container DataFrame. Time series data up to fit.
Returns:
None
"""
return super().fit()
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
"""
Process the testing data.
Args:
inputs: Container DataFrame. Time series data up to outlier detection.
Returns:
Container DataFrame
1 marks Outliers, 0 marks normal.
"""
return super().produce(inputs=inputs, timeout=timeout, iterations=iterations)
def get_params(self) -> Params:
"""
Return parameters.
Args:
None
Returns:
class Params
"""
return super().get_params()
def set_params(self, *, params: Params) -> None:
"""
Set parameters for outlier detection.
Args:
params: class Params
Returns:
None
"""
super().set_params(params=params)
| 39.422383 | 174 | 0.67381 | from typing import Any, Callable, List, Dict, Union, Optional, Sequence, Tuple
from numpy import ndarray
from collections import OrderedDict
from scipy import sparse
import os
import sklearn
import numpy
import typing
import warnings
import numpy as np
from sklearn.utils import check_array
from sklearn.exceptions import NotFittedError
from pyod.utils.utility import argmaxn
from d3m.container.numpy import ndarray as d3m_ndarray
from d3m.container import DataFrame as d3m_dataframe
from d3m.metadata import hyperparams, params, base as metadata_base
from d3m import utils
from d3m.base import utils as base_utils
from d3m.exceptions import PrimitiveNotFittedError
from d3m.primitive_interfaces.base import CallResult, DockerContainer
from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces.transformer import TransformerPrimitiveBase
from d3m.primitive_interfaces.base import ProbabilisticCompositionalityMixin, ContinueFitMixin
from d3m import exceptions
import pandas
from d3m import container, utils as d3m_utils
from .UODBasePrimitive import Params_ODBase, Hyperparams_ODBase, UnsupervisedOutlierDetectorBase
from pyod.models.iforest import IForest
from typing import Union
import uuid
Inputs = d3m_dataframe
Outputs = d3m_dataframe
class Params(Params_ODBase):
://metadata.datadrivendiscovery.org/types/TuningParameter']
)
max_features = hyperparams.Hyperparameter[float](
default=1.,
description='The number of features to draw from X to train each base estimator.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
bootstrap = hyperparams.UniformBool(
default=False,
description='If True, individual trees are fit on random subsets of the training data sampled with replacement. If False, sampling without replacement is performed.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
behaviour = hyperparams.Enumeration[str](
values=['old', 'new'],
default='new',
description='Refer to https://github.com/yzhao062/pyod/blob/master/pyod/models/iforest.py.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
random_state = hyperparams.Union[Union[int, None]](
configuration=OrderedDict(
init=hyperparams.Hyperparameter[int](
default=0,
),
ninit=hyperparams.Hyperparameter[None](
default=None,
),
),
default='ninit',
description='the seed used by the random number generator.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
)
verbose = hyperparams.Hyperparameter[int](
default=0,
description='Controls the verbosity of the tree building process.',
semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']
)
pass
class IsolationForest(UnsupervisedOutlierDetectorBase[Inputs, Outputs, Params, Hyperparams]):
metadata = metadata_base.PrimitiveMetadata({
"name": "TODS.anomaly_detection_primitives.IsolationForest",
"python_path": "d3m.primitives.tods.detection_algorithm.pyod_iforest",
"source": {'name': "DATALAB @Taxes A&M University", 'contact': 'mailto:khlai037@tamu.edu',
'uris': ['https://gitlab.com/lhenry15/tods.git']},
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.ISOLATION_FOREST, ],
"primitive_family": metadata_base.PrimitiveFamily.ANOMALY_DETECTION,
"version": "0.0.1",
"hyperparams_to_tune": ['n_estimators', 'contamination'],
"id": str(uuid.uuid3(uuid.NAMESPACE_DNS, 'IsolationForest'))
})
def __init__(self, *,
hyperparams: Hyperparams,
random_seed: int = 0,
docker_containers: Dict[str, DockerContainer] = None) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
self._clf = IForest(contamination=hyperparams['contamination'],
n_estimators=hyperparams['n_estimators'],
max_samples=hyperparams['max_samples'],
max_features=hyperparams['max_features'],
bootstrap=hyperparams['bootstrap'],
behaviour=hyperparams['behaviour'],
random_state=hyperparams['random_state'],
verbose=hyperparams['verbose'],
)
return
def set_training_data(self, *, inputs: Inputs) -> None:
super().set_training_data(inputs=inputs)
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
return super().fit()
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
return super().produce(inputs=inputs, timeout=timeout, iterations=iterations)
def get_params(self) -> Params:
return super().get_params()
def set_params(self, *, params: Params) -> None:
super().set_params(params=params)
| true | true |
1c3b32ae49039d2114ead736aae2b810829bd738 | 13,305 | py | Python | testing/test_tmpdir.py | zhangq0813/pytest | 2d9dac95ecbc67c73fc9f834a9a551d93befe159 | [
"MIT"
] | 2 | 2016-06-27T06:44:26.000Z | 2021-08-29T03:03:48.000Z | testing/test_tmpdir.py | zhangq0813/pytest | 2d9dac95ecbc67c73fc9f834a9a551d93befe159 | [
"MIT"
] | null | null | null | testing/test_tmpdir.py | zhangq0813/pytest | 2d9dac95ecbc67c73fc9f834a9a551d93befe159 | [
"MIT"
] | 1 | 2015-12-08T03:13:28.000Z | 2015-12-08T03:13:28.000Z | import os
import stat
import sys
import attr
import pytest
from _pytest import pathlib
from _pytest.pathlib import Path
def test_tmpdir_fixture(testdir):
p = testdir.copy_example("tmpdir/tmpdir_fixture.py")
results = testdir.runpytest(p)
results.stdout.fnmatch_lines(["*1 passed*"])
@attr.s
class FakeConfig:
basetemp = attr.ib()
@property
def trace(self):
return self
def get(self, key):
return lambda *k: None
@property
def option(self):
return self
class TestTempdirHandler:
def test_mktemp(self, tmp_path):
from _pytest.tmpdir import TempdirFactory, TempPathFactory
config = FakeConfig(tmp_path)
t = TempdirFactory(TempPathFactory.from_config(config))
tmp = t.mktemp("world")
assert tmp.relto(t.getbasetemp()) == "world0"
tmp = t.mktemp("this")
assert tmp.relto(t.getbasetemp()).startswith("this")
tmp2 = t.mktemp("this")
assert tmp2.relto(t.getbasetemp()).startswith("this")
assert tmp2 != tmp
def test_tmppath_relative_basetemp_absolute(self, tmp_path, monkeypatch):
"""#4425"""
from _pytest.tmpdir import TempPathFactory
monkeypatch.chdir(tmp_path)
config = FakeConfig("hello")
t = TempPathFactory.from_config(config)
assert t.getbasetemp().resolve() == (tmp_path / "hello").resolve()
class TestConfigTmpdir:
def test_getbasetemp_custom_removes_old(self, testdir):
mytemp = testdir.tmpdir.join("xyz")
p = testdir.makepyfile(
"""
def test_1(tmpdir):
pass
"""
)
testdir.runpytest(p, "--basetemp=%s" % mytemp)
mytemp.check()
mytemp.ensure("hello")
testdir.runpytest(p, "--basetemp=%s" % mytemp)
mytemp.check()
assert not mytemp.join("hello").check()
testdata = [
("mypath", True),
("/mypath1", False),
("./mypath1", True),
("../mypath3", False),
("../../mypath4", False),
("mypath5/..", False),
("mypath6/../mypath6", True),
("mypath7/../mypath7/..", False),
]
@pytest.mark.parametrize("basename, is_ok", testdata)
def test_mktemp(testdir, basename, is_ok):
mytemp = testdir.tmpdir.mkdir("mytemp")
p = testdir.makepyfile(
"""
import pytest
def test_abs_path(tmpdir_factory):
tmpdir_factory.mktemp('{}', numbered=False)
""".format(
basename
)
)
result = testdir.runpytest(p, "--basetemp=%s" % mytemp)
if is_ok:
assert result.ret == 0
assert mytemp.join(basename).check()
else:
assert result.ret == 1
result.stdout.fnmatch_lines("*ValueError*")
def test_tmpdir_always_is_realpath(testdir):
# the reason why tmpdir should be a realpath is that
# when you cd to it and do "os.getcwd()" you will anyway
# get the realpath. Using the symlinked path can thus
# easily result in path-inequality
# XXX if that proves to be a problem, consider using
# os.environ["PWD"]
realtemp = testdir.tmpdir.mkdir("myrealtemp")
linktemp = testdir.tmpdir.join("symlinktemp")
attempt_symlink_to(linktemp, str(realtemp))
p = testdir.makepyfile(
"""
def test_1(tmpdir):
import os
assert os.path.realpath(str(tmpdir)) == str(tmpdir)
"""
)
result = testdir.runpytest("-s", p, "--basetemp=%s/bt" % linktemp)
assert not result.ret
def test_tmp_path_always_is_realpath(testdir, monkeypatch):
# for reasoning see: test_tmpdir_always_is_realpath test-case
realtemp = testdir.tmpdir.mkdir("myrealtemp")
linktemp = testdir.tmpdir.join("symlinktemp")
attempt_symlink_to(linktemp, str(realtemp))
monkeypatch.setenv("PYTEST_DEBUG_TEMPROOT", str(linktemp))
testdir.makepyfile(
"""
def test_1(tmp_path):
assert tmp_path.resolve() == tmp_path
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_too_long_on_parametrization(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", ["1"*1000])
def test_some(arg, tmpdir):
tmpdir.ensure("hello")
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_factory(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope='session')
def session_dir(tmpdir_factory):
return tmpdir_factory.mktemp('data', numbered=False)
def test_some(session_dir):
assert session_dir.isdir()
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_fallback_tox_env(testdir, monkeypatch):
"""Test that tmpdir works even if environment variables required by getpass
module are missing (#1010).
"""
monkeypatch.delenv("USER", raising=False)
monkeypatch.delenv("USERNAME", raising=False)
testdir.makepyfile(
"""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.fixture
def break_getuser(monkeypatch):
monkeypatch.setattr("os.getuid", lambda: -1)
# taken from python 2.7/3.4
for envvar in ("LOGNAME", "USER", "LNAME", "USERNAME"):
monkeypatch.delenv(envvar, raising=False)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no os.getuid on windows")
def test_tmpdir_fallback_uid_not_found(testdir):
"""Test that tmpdir works even if the current process's user id does not
correspond to a valid user.
"""
testdir.makepyfile(
"""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no os.getuid on windows")
def test_get_user_uid_not_found():
"""Test that get_user() function works even if the current process's
user id does not correspond to a valid user (e.g. running pytest in a
Docker container with 'docker run -u'.
"""
from _pytest.tmpdir import get_user
assert get_user() is None
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="win only")
def test_get_user(monkeypatch):
"""Test that get_user() function works even if environment variables
required by getpass module are missing from the environment on Windows
(#1010).
"""
from _pytest.tmpdir import get_user
monkeypatch.delenv("USER", raising=False)
monkeypatch.delenv("USERNAME", raising=False)
assert get_user() is None
class TestNumberedDir:
PREFIX = "fun-"
def test_make(self, tmp_path):
from _pytest.pathlib import make_numbered_dir
for i in range(10):
d = make_numbered_dir(root=tmp_path, prefix=self.PREFIX)
assert d.name.startswith(self.PREFIX)
assert d.name.endswith(str(i))
symlink = tmp_path.joinpath(self.PREFIX + "current")
if symlink.exists():
# unix
assert symlink.is_symlink()
assert symlink.resolve() == d.resolve()
def test_cleanup_lock_create(self, tmp_path):
d = tmp_path.joinpath("test")
d.mkdir()
from _pytest.pathlib import create_cleanup_lock
lockfile = create_cleanup_lock(d)
with pytest.raises(OSError, match="cannot create lockfile in .*"):
create_cleanup_lock(d)
lockfile.unlink()
def test_lock_register_cleanup_removal(self, tmp_path):
from _pytest.pathlib import create_cleanup_lock, register_cleanup_lock_removal
lock = create_cleanup_lock(tmp_path)
registry = []
register_cleanup_lock_removal(lock, register=registry.append)
(cleanup_func,) = registry
assert lock.is_file()
cleanup_func(original_pid="intentionally_different")
assert lock.is_file()
cleanup_func()
assert not lock.exists()
cleanup_func()
assert not lock.exists()
def _do_cleanup(self, tmp_path):
self.test_make(tmp_path)
from _pytest.pathlib import cleanup_numbered_dir
cleanup_numbered_dir(
root=tmp_path,
prefix=self.PREFIX,
keep=2,
consider_lock_dead_if_created_before=0,
)
def test_cleanup_keep(self, tmp_path):
self._do_cleanup(tmp_path)
a, b = (x for x in tmp_path.iterdir() if not x.is_symlink())
print(a, b)
def test_cleanup_locked(self, tmp_path):
from _pytest import pathlib
p = pathlib.make_numbered_dir(root=tmp_path, prefix=self.PREFIX)
pathlib.create_cleanup_lock(p)
assert not pathlib.ensure_deletable(
p, consider_lock_dead_if_created_before=p.stat().st_mtime - 1
)
assert pathlib.ensure_deletable(
p, consider_lock_dead_if_created_before=p.stat().st_mtime + 1
)
def test_cleanup_ignores_symlink(self, tmp_path):
the_symlink = tmp_path / (self.PREFIX + "current")
attempt_symlink_to(the_symlink, tmp_path / (self.PREFIX + "5"))
self._do_cleanup(tmp_path)
def test_removal_accepts_lock(self, tmp_path):
folder = pathlib.make_numbered_dir(root=tmp_path, prefix=self.PREFIX)
pathlib.create_cleanup_lock(folder)
pathlib.maybe_delete_a_numbered_dir(folder)
assert folder.is_dir()
class TestRmRf:
def test_rm_rf(self, tmp_path):
from _pytest.pathlib import rm_rf
adir = tmp_path / "adir"
adir.mkdir()
rm_rf(adir)
assert not adir.exists()
adir.mkdir()
afile = adir / "afile"
afile.write_bytes(b"aa")
rm_rf(adir)
assert not adir.exists()
def test_rm_rf_with_read_only_file(self, tmp_path):
"""Ensure rm_rf can remove directories with read-only files in them (#5524)"""
from _pytest.pathlib import rm_rf
fn = tmp_path / "dir/foo.txt"
fn.parent.mkdir()
fn.touch()
self.chmod_r(fn)
rm_rf(fn.parent)
assert not fn.parent.is_dir()
def chmod_r(self, path):
mode = os.stat(str(path)).st_mode
os.chmod(str(path), mode & ~stat.S_IWRITE)
def test_rm_rf_with_read_only_directory(self, tmp_path):
"""Ensure rm_rf can remove read-only directories (#5524)"""
from _pytest.pathlib import rm_rf
adir = tmp_path / "dir"
adir.mkdir()
(adir / "foo.txt").touch()
self.chmod_r(adir)
rm_rf(adir)
assert not adir.is_dir()
def test_on_rm_rf_error(self, tmp_path):
from _pytest.pathlib import on_rm_rf_error
adir = tmp_path / "dir"
adir.mkdir()
fn = adir / "foo.txt"
fn.touch()
self.chmod_r(fn)
# unknown exception
with pytest.warns(pytest.PytestWarning):
exc_info = (None, RuntimeError(), None)
on_rm_rf_error(os.unlink, str(fn), exc_info, start_path=tmp_path)
assert fn.is_file()
# we ignore FileNotFoundError
exc_info = (None, FileNotFoundError(), None)
assert not on_rm_rf_error(None, str(fn), exc_info, start_path=tmp_path)
# unknown function
with pytest.warns(
pytest.PytestWarning,
match=r"^\(rm_rf\) unknown function None when removing .*foo.txt:\nNone: ",
):
exc_info = (None, PermissionError(), None)
on_rm_rf_error(None, str(fn), exc_info, start_path=tmp_path)
assert fn.is_file()
# ignored function
with pytest.warns(None) as warninfo:
exc_info = (None, PermissionError(), None)
on_rm_rf_error(os.open, str(fn), exc_info, start_path=tmp_path)
assert fn.is_file()
assert not [x.message for x in warninfo]
exc_info = (None, PermissionError(), None)
on_rm_rf_error(os.unlink, str(fn), exc_info, start_path=tmp_path)
assert not fn.is_file()
def attempt_symlink_to(path, to_path):
"""Try to make a symlink from "path" to "to_path", skipping in case this platform
does not support it or we don't have sufficient privileges (common on Windows)."""
try:
Path(path).symlink_to(Path(to_path))
except OSError:
pytest.skip("could not create symbolic link")
def test_tmpdir_equals_tmp_path(tmpdir, tmp_path):
assert Path(tmpdir) == tmp_path
def test_basetemp_with_read_only_files(testdir):
"""Integration test for #5524"""
testdir.makepyfile(
"""
import os
import stat
def test(tmp_path):
fn = tmp_path / 'foo.txt'
fn.write_text('hello')
mode = os.stat(str(fn)).st_mode
os.chmod(str(fn), mode & ~stat.S_IREAD)
"""
)
result = testdir.runpytest("--basetemp=tmp")
assert result.ret == 0
# running a second time and ensure we don't crash
result = testdir.runpytest("--basetemp=tmp")
assert result.ret == 0
| 28.798701 | 87 | 0.634573 | import os
import stat
import sys
import attr
import pytest
from _pytest import pathlib
from _pytest.pathlib import Path
def test_tmpdir_fixture(testdir):
p = testdir.copy_example("tmpdir/tmpdir_fixture.py")
results = testdir.runpytest(p)
results.stdout.fnmatch_lines(["*1 passed*"])
@attr.s
class FakeConfig:
basetemp = attr.ib()
@property
def trace(self):
return self
def get(self, key):
return lambda *k: None
@property
def option(self):
return self
class TestTempdirHandler:
def test_mktemp(self, tmp_path):
from _pytest.tmpdir import TempdirFactory, TempPathFactory
config = FakeConfig(tmp_path)
t = TempdirFactory(TempPathFactory.from_config(config))
tmp = t.mktemp("world")
assert tmp.relto(t.getbasetemp()) == "world0"
tmp = t.mktemp("this")
assert tmp.relto(t.getbasetemp()).startswith("this")
tmp2 = t.mktemp("this")
assert tmp2.relto(t.getbasetemp()).startswith("this")
assert tmp2 != tmp
def test_tmppath_relative_basetemp_absolute(self, tmp_path, monkeypatch):
from _pytest.tmpdir import TempPathFactory
monkeypatch.chdir(tmp_path)
config = FakeConfig("hello")
t = TempPathFactory.from_config(config)
assert t.getbasetemp().resolve() == (tmp_path / "hello").resolve()
class TestConfigTmpdir:
def test_getbasetemp_custom_removes_old(self, testdir):
mytemp = testdir.tmpdir.join("xyz")
p = testdir.makepyfile(
"""
def test_1(tmpdir):
pass
"""
)
testdir.runpytest(p, "--basetemp=%s" % mytemp)
mytemp.check()
mytemp.ensure("hello")
testdir.runpytest(p, "--basetemp=%s" % mytemp)
mytemp.check()
assert not mytemp.join("hello").check()
testdata = [
("mypath", True),
("/mypath1", False),
("./mypath1", True),
("../mypath3", False),
("../../mypath4", False),
("mypath5/..", False),
("mypath6/../mypath6", True),
("mypath7/../mypath7/..", False),
]
@pytest.mark.parametrize("basename, is_ok", testdata)
def test_mktemp(testdir, basename, is_ok):
mytemp = testdir.tmpdir.mkdir("mytemp")
p = testdir.makepyfile(
"""
import pytest
def test_abs_path(tmpdir_factory):
tmpdir_factory.mktemp('{}', numbered=False)
""".format(
basename
)
)
result = testdir.runpytest(p, "--basetemp=%s" % mytemp)
if is_ok:
assert result.ret == 0
assert mytemp.join(basename).check()
else:
assert result.ret == 1
result.stdout.fnmatch_lines("*ValueError*")
def test_tmpdir_always_is_realpath(testdir):
realtemp = testdir.tmpdir.mkdir("myrealtemp")
linktemp = testdir.tmpdir.join("symlinktemp")
attempt_symlink_to(linktemp, str(realtemp))
p = testdir.makepyfile(
"""
def test_1(tmpdir):
import os
assert os.path.realpath(str(tmpdir)) == str(tmpdir)
"""
)
result = testdir.runpytest("-s", p, "--basetemp=%s/bt" % linktemp)
assert not result.ret
def test_tmp_path_always_is_realpath(testdir, monkeypatch):
realtemp = testdir.tmpdir.mkdir("myrealtemp")
linktemp = testdir.tmpdir.join("symlinktemp")
attempt_symlink_to(linktemp, str(realtemp))
monkeypatch.setenv("PYTEST_DEBUG_TEMPROOT", str(linktemp))
testdir.makepyfile(
"""
def test_1(tmp_path):
assert tmp_path.resolve() == tmp_path
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_too_long_on_parametrization(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", ["1"*1000])
def test_some(arg, tmpdir):
tmpdir.ensure("hello")
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_factory(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope='session')
def session_dir(tmpdir_factory):
return tmpdir_factory.mktemp('data', numbered=False)
def test_some(session_dir):
assert session_dir.isdir()
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_fallback_tox_env(testdir, monkeypatch):
monkeypatch.delenv("USER", raising=False)
monkeypatch.delenv("USERNAME", raising=False)
testdir.makepyfile(
"""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.fixture
def break_getuser(monkeypatch):
monkeypatch.setattr("os.getuid", lambda: -1)
for envvar in ("LOGNAME", "USER", "LNAME", "USERNAME"):
monkeypatch.delenv(envvar, raising=False)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no os.getuid on windows")
def test_tmpdir_fallback_uid_not_found(testdir):
testdir.makepyfile(
"""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no os.getuid on windows")
def test_get_user_uid_not_found():
from _pytest.tmpdir import get_user
assert get_user() is None
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="win only")
def test_get_user(monkeypatch):
from _pytest.tmpdir import get_user
monkeypatch.delenv("USER", raising=False)
monkeypatch.delenv("USERNAME", raising=False)
assert get_user() is None
class TestNumberedDir:
PREFIX = "fun-"
def test_make(self, tmp_path):
from _pytest.pathlib import make_numbered_dir
for i in range(10):
d = make_numbered_dir(root=tmp_path, prefix=self.PREFIX)
assert d.name.startswith(self.PREFIX)
assert d.name.endswith(str(i))
symlink = tmp_path.joinpath(self.PREFIX + "current")
if symlink.exists():
assert symlink.is_symlink()
assert symlink.resolve() == d.resolve()
def test_cleanup_lock_create(self, tmp_path):
d = tmp_path.joinpath("test")
d.mkdir()
from _pytest.pathlib import create_cleanup_lock
lockfile = create_cleanup_lock(d)
with pytest.raises(OSError, match="cannot create lockfile in .*"):
create_cleanup_lock(d)
lockfile.unlink()
def test_lock_register_cleanup_removal(self, tmp_path):
from _pytest.pathlib import create_cleanup_lock, register_cleanup_lock_removal
lock = create_cleanup_lock(tmp_path)
registry = []
register_cleanup_lock_removal(lock, register=registry.append)
(cleanup_func,) = registry
assert lock.is_file()
cleanup_func(original_pid="intentionally_different")
assert lock.is_file()
cleanup_func()
assert not lock.exists()
cleanup_func()
assert not lock.exists()
def _do_cleanup(self, tmp_path):
self.test_make(tmp_path)
from _pytest.pathlib import cleanup_numbered_dir
cleanup_numbered_dir(
root=tmp_path,
prefix=self.PREFIX,
keep=2,
consider_lock_dead_if_created_before=0,
)
def test_cleanup_keep(self, tmp_path):
self._do_cleanup(tmp_path)
a, b = (x for x in tmp_path.iterdir() if not x.is_symlink())
print(a, b)
def test_cleanup_locked(self, tmp_path):
from _pytest import pathlib
p = pathlib.make_numbered_dir(root=tmp_path, prefix=self.PREFIX)
pathlib.create_cleanup_lock(p)
assert not pathlib.ensure_deletable(
p, consider_lock_dead_if_created_before=p.stat().st_mtime - 1
)
assert pathlib.ensure_deletable(
p, consider_lock_dead_if_created_before=p.stat().st_mtime + 1
)
def test_cleanup_ignores_symlink(self, tmp_path):
the_symlink = tmp_path / (self.PREFIX + "current")
attempt_symlink_to(the_symlink, tmp_path / (self.PREFIX + "5"))
self._do_cleanup(tmp_path)
def test_removal_accepts_lock(self, tmp_path):
folder = pathlib.make_numbered_dir(root=tmp_path, prefix=self.PREFIX)
pathlib.create_cleanup_lock(folder)
pathlib.maybe_delete_a_numbered_dir(folder)
assert folder.is_dir()
class TestRmRf:
def test_rm_rf(self, tmp_path):
from _pytest.pathlib import rm_rf
adir = tmp_path / "adir"
adir.mkdir()
rm_rf(adir)
assert not adir.exists()
adir.mkdir()
afile = adir / "afile"
afile.write_bytes(b"aa")
rm_rf(adir)
assert not adir.exists()
def test_rm_rf_with_read_only_file(self, tmp_path):
from _pytest.pathlib import rm_rf
fn = tmp_path / "dir/foo.txt"
fn.parent.mkdir()
fn.touch()
self.chmod_r(fn)
rm_rf(fn.parent)
assert not fn.parent.is_dir()
def chmod_r(self, path):
mode = os.stat(str(path)).st_mode
os.chmod(str(path), mode & ~stat.S_IWRITE)
def test_rm_rf_with_read_only_directory(self, tmp_path):
from _pytest.pathlib import rm_rf
adir = tmp_path / "dir"
adir.mkdir()
(adir / "foo.txt").touch()
self.chmod_r(adir)
rm_rf(adir)
assert not adir.is_dir()
def test_on_rm_rf_error(self, tmp_path):
from _pytest.pathlib import on_rm_rf_error
adir = tmp_path / "dir"
adir.mkdir()
fn = adir / "foo.txt"
fn.touch()
self.chmod_r(fn)
with pytest.warns(pytest.PytestWarning):
exc_info = (None, RuntimeError(), None)
on_rm_rf_error(os.unlink, str(fn), exc_info, start_path=tmp_path)
assert fn.is_file()
exc_info = (None, FileNotFoundError(), None)
assert not on_rm_rf_error(None, str(fn), exc_info, start_path=tmp_path)
with pytest.warns(
pytest.PytestWarning,
match=r"^\(rm_rf\) unknown function None when removing .*foo.txt:\nNone: ",
):
exc_info = (None, PermissionError(), None)
on_rm_rf_error(None, str(fn), exc_info, start_path=tmp_path)
assert fn.is_file()
with pytest.warns(None) as warninfo:
exc_info = (None, PermissionError(), None)
on_rm_rf_error(os.open, str(fn), exc_info, start_path=tmp_path)
assert fn.is_file()
assert not [x.message for x in warninfo]
exc_info = (None, PermissionError(), None)
on_rm_rf_error(os.unlink, str(fn), exc_info, start_path=tmp_path)
assert not fn.is_file()
def attempt_symlink_to(path, to_path):
try:
Path(path).symlink_to(Path(to_path))
except OSError:
pytest.skip("could not create symbolic link")
def test_tmpdir_equals_tmp_path(tmpdir, tmp_path):
assert Path(tmpdir) == tmp_path
def test_basetemp_with_read_only_files(testdir):
testdir.makepyfile(
"""
import os
import stat
def test(tmp_path):
fn = tmp_path / 'foo.txt'
fn.write_text('hello')
mode = os.stat(str(fn)).st_mode
os.chmod(str(fn), mode & ~stat.S_IREAD)
"""
)
result = testdir.runpytest("--basetemp=tmp")
assert result.ret == 0
result = testdir.runpytest("--basetemp=tmp")
assert result.ret == 0
| true | true |
1c3b32ec0caeaa5fc611f6e4bfcc1c3e2e4a6792 | 1,402 | py | Python | ceres/cmds/stop.py | duderino999/ceres-combineharvester | f63ab6c4d0e33c3b6550c1f5641f28ab2c68b001 | [
"Apache-2.0"
] | 39 | 2021-08-04T14:49:27.000Z | 2022-03-29T16:30:19.000Z | ceres/cmds/stop.py | rickguo216/ceres-combineharvester | e93b26a77b1fc4fe9de80d10f745b09a13f9c288 | [
"Apache-2.0"
] | 30 | 2021-08-19T22:44:31.000Z | 2022-03-29T19:09:26.000Z | ceres/cmds/stop.py | rickguo216/ceres-combineharvester | e93b26a77b1fc4fe9de80d10f745b09a13f9c288 | [
"Apache-2.0"
] | 23 | 2021-08-07T07:33:20.000Z | 2022-03-27T11:15:00.000Z | import sys
from pathlib import Path
import click
from ceres.util.service_groups import all_groups, services_for_groups
async def async_stop(root_path: Path, group: str, stop_daemon: bool) -> int:
from ceres.daemon.client import connect_to_daemon_and_validate
daemon = await connect_to_daemon_and_validate(root_path)
if daemon is None:
print("Couldn't connect to ceres daemon")
return 1
if stop_daemon:
r = await daemon.exit()
await daemon.close()
print(f"daemon: {r}")
return 0
return_val = 0
for service in services_for_groups(group):
print(f"{service}: ", end="", flush=True)
if not await daemon.is_running(service_name=service):
print("Not running")
elif await daemon.stop_service(service_name=service):
print("Stopped")
else:
print("Stop failed")
return_val = 1
await daemon.close()
return return_val
@click.command("stop", short_help="Stop services")
@click.option("-d", "--daemon", is_flag=True, type=bool, help="Stop daemon")
@click.argument("group", type=click.Choice(all_groups()), nargs=-1, required=True)
@click.pass_context
def stop_cmd(ctx: click.Context, daemon: bool, group: str) -> None:
import asyncio
sys.exit(asyncio.get_event_loop().run_until_complete(async_stop(ctx.obj["root_path"], group, daemon)))
| 29.829787 | 106 | 0.675464 | import sys
from pathlib import Path
import click
from ceres.util.service_groups import all_groups, services_for_groups
async def async_stop(root_path: Path, group: str, stop_daemon: bool) -> int:
from ceres.daemon.client import connect_to_daemon_and_validate
daemon = await connect_to_daemon_and_validate(root_path)
if daemon is None:
print("Couldn't connect to ceres daemon")
return 1
if stop_daemon:
r = await daemon.exit()
await daemon.close()
print(f"daemon: {r}")
return 0
return_val = 0
for service in services_for_groups(group):
print(f"{service}: ", end="", flush=True)
if not await daemon.is_running(service_name=service):
print("Not running")
elif await daemon.stop_service(service_name=service):
print("Stopped")
else:
print("Stop failed")
return_val = 1
await daemon.close()
return return_val
@click.command("stop", short_help="Stop services")
@click.option("-d", "--daemon", is_flag=True, type=bool, help="Stop daemon")
@click.argument("group", type=click.Choice(all_groups()), nargs=-1, required=True)
@click.pass_context
def stop_cmd(ctx: click.Context, daemon: bool, group: str) -> None:
import asyncio
sys.exit(asyncio.get_event_loop().run_until_complete(async_stop(ctx.obj["root_path"], group, daemon)))
| true | true |
1c3b32f7451e1d9f5b69fab805e7fcfefbf65a3d | 7,541 | py | Python | blog/settings.py | JeffreyDrJ/myfirstblog | 5b1f0fbd6c85027bad7d0e24d67251f41da53fac | [
"MIT"
] | null | null | null | blog/settings.py | JeffreyDrJ/myfirstblog | 5b1f0fbd6c85027bad7d0e24d67251f41da53fac | [
"MIT"
] | 1 | 2021-05-29T17:09:43.000Z | 2021-05-29T17:09:43.000Z | blog/settings.py | JeffreyDrJ/myfirstblog | 5b1f0fbd6c85027bad7d0e24d67251f41da53fac | [
"MIT"
] | null | null | null | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-r+%^+a^s#d$gyexts9osfo)yb=4@w(+$=(8iv5r59i@q14(f+x'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#TODO:内网访问10.88.137.43
ALLOWED_HOSTS = []
# Application definition,迁移前必须做
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles', # https://www.cnblogs.com/Andy963/p/Django.html
'users.apps.UsersConfig', # 子应用users 注册
'home.apps.HomeConfig', # 子应用home 注册
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls' # manage.py所在文件夹为根目录,找到blog/urls.py
# templates链接
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
# 'builtins':['django.templatetags.static'] #这样以后在模版中就可以直接使用static标签,而不用手动的load了。
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# Mysql 链接
DATABASES = { # mysql配置信息
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql', # 数据库引擎改成mysql
'HOST': '127.0.0.1', # 数据库主机改成localhost
'PORT': 3306, # 数据库端口
'USER': 'Jeffrey', # 数据库用户名
'PASSWORD': 'ding', # 数据库用户密码
'NAME': 'blog' # 数据库名字
}
}
# Redis 链接(配置信息)
CACHES = {
"default": { # 默认,redis#0号库
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": { # session信息保存在:#1号库
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
# session由数据库存储改为redis存储
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh-hans'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# 日志
import time
import logging
# 日志
import time
cur_path = os.path.dirname(os.path.realpath(__file__)) # log_path是存放日志的路径
log_path = os.path.join(os.path.dirname(cur_path), 'logs')
if not os.path.exists(log_path):
os.mkdir(log_path) # 如果不存在这个logs文件夹,就自动创建一个
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
# 日志格式
'standard': {
'format': '[%(asctime)s] [%(filename)s:%(lineno)d] [%(module)s:%(funcName)s] '
'[%(levelname)s]- %(message)s'},
'simple': { # 简单格式
'format': '%(levelname)s %(message)s'
},
},
# 过滤
'filters': {
# 'require_debug_true': { # django在debug模式下才输出日志
# '()': 'django.utils.log.RequireDebugTrue',
},
# 定义具体处理日志的方式
'handlers': {
# 默认记录所有日志
'default': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(log_path, 'all-{}.log'.format(time.strftime('%Y-%m-%d'))),
'maxBytes': 1024 * 1024 * 5, # 文件大小
'backupCount': 5, # 备份数
'formatter': 'standard', # 输出格式
'encoding': 'utf-8', # 设置默认编码,否则打印出来汉字乱码
},
# 输出错误日志
'error': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(log_path, 'error-{}.log'.format(time.strftime('%Y-%m-%d'))),
'maxBytes': 1024 * 1024 * 5, # 文件大小
'backupCount': 5, # 备份数
'formatter': 'standard', # 输出格式
'encoding': 'utf-8', # 设置默认编码
},
# 控制台输出
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
# 输出info日志
'info': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(log_path, 'info-{}.log'.format(time.strftime('%Y-%m-%d'))),
'maxBytes': 1024 * 1024 * 5,
'backupCount': 5,
'formatter': 'standard',
'encoding': 'utf-8', # 设置默认编码
},
},
# 配置用哪几种 handlers 来处理日志
'loggers': {
# 类型 为 django 处理所有类型的日志, 默认调用
'django': { # 定义名为django的日志器
'handlers': ['default', 'console'], # 同时向终端与文件中输出日志
'level': 'INFO', # 日志器接受的最低级别日志
'propagate': False # 是否继续传递日志信号
},
# log 调用时需要当作参数传入
'log': {
'handlers': ['error', 'info', 'console', 'default'],
'level': 'INFO',
'propagate': True
},
}
}
# 和static静态文件的链接
STATIC_URL = '/static/' # 别名
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# 替换系统的User 来使用自定义的User(用户模型)
# 配置信息为 ‘子应用名.模型类型’
AUTH_USER_MODEL = 'users.User'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# 修改系统的未登录默认跳转链接:
LOGIN_URL = '/login/'
# 设置上传的头像至blog/download/
MEDIA_ROOT = os.path.join(BASE_DIR,'download/')
# 设置图片访问的统一路由(用来解决头像不显示,路径错误http://127.0.0.1:8000/avatars/20210503/)
# 这时候会变成/download/avatar/...
MEDIA_URL = '/download/'
| 30.407258 | 97 | 0.610396 |
from pathlib import Path
import os
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-r+%^+a^s#d$gyexts9osfo)yb=4@w(+$=(8iv5r59i@q14(f+x'
DEBUG = True
#TODO:内网访问10.88.137.43
ALLOWED_HOSTS = []
# Application definition,迁移前必须做
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles', # https://www.cnblogs.com/Andy963/p/Django.html
'users.apps.UsersConfig', # 子应用users 注册
'home.apps.HomeConfig', # 子应用home 注册
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls' # manage.py所在文件夹为根目录,找到blog/urls.py
# templates链接
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
# 'builtins':['django.templatetags.static'] #这样以后在模版中就可以直接使用static标签,而不用手动的load了。
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# Mysql 链接
DATABASES = { # mysql配置信息
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql', # 数据库引擎改成mysql
'HOST': '127.0.0.1', # 数据库主机改成localhost
'PORT': 3306, # 数据库端口
'USER': 'Jeffrey', # 数据库用户名
'PASSWORD': 'ding', # 数据库用户密码
'NAME': 'blog' # 数据库名字
}
}
# Redis 链接(配置信息)
CACHES = {
"default": { # 默认,redis#0号库
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": { # session信息保存在:#1号库
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
# session由数据库存储改为redis存储
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh-hans'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# 日志
import time
import logging
# 日志
import time
cur_path = os.path.dirname(os.path.realpath(__file__)) # log_path是存放日志的路径
log_path = os.path.join(os.path.dirname(cur_path), 'logs')
if not os.path.exists(log_path):
os.mkdir(log_path) # 如果不存在这个logs文件夹,就自动创建一个
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
# 日志格式
'standard': {
'format': '[%(asctime)s] [%(filename)s:%(lineno)d] [%(module)s:%(funcName)s] '
'[%(levelname)s]- %(message)s'},
'simple': { # 简单格式
'format': '%(levelname)s %(message)s'
},
},
# 过滤
'filters': {
# 'require_debug_true': { # django在debug模式下才输出日志
# '()': 'django.utils.log.RequireDebugTrue',
},
# 定义具体处理日志的方式
'handlers': {
# 默认记录所有日志
'default': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(log_path, 'all-{}.log'.format(time.strftime('%Y-%m-%d'))),
'maxBytes': 1024 * 1024 * 5, # 文件大小
'backupCount': 5, # 备份数
'formatter': 'standard', # 输出格式
'encoding': 'utf-8', # 设置默认编码,否则打印出来汉字乱码
},
# 输出错误日志
'error': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(log_path, 'error-{}.log'.format(time.strftime('%Y-%m-%d'))),
'maxBytes': 1024 * 1024 * 5, # 文件大小
'backupCount': 5, # 备份数
'formatter': 'standard', # 输出格式
'encoding': 'utf-8', # 设置默认编码
},
# 控制台输出
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
# 输出info日志
'info': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(log_path, 'info-{}.log'.format(time.strftime('%Y-%m-%d'))),
'maxBytes': 1024 * 1024 * 5,
'backupCount': 5,
'formatter': 'standard',
'encoding': 'utf-8', # 设置默认编码
},
},
# 配置用哪几种 handlers 来处理日志
'loggers': {
# 类型 为 django 处理所有类型的日志, 默认调用
'django': { # 定义名为django的日志器
'handlers': ['default', 'console'], # 同时向终端与文件中输出日志
'level': 'INFO', # 日志器接受的最低级别日志
'propagate': False # 是否继续传递日志信号
},
# log 调用时需要当作参数传入
'log': {
'handlers': ['error', 'info', 'console', 'default'],
'level': 'INFO',
'propagate': True
},
}
}
# 和static静态文件的链接
STATIC_URL = '/static/' # 别名
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# 替换系统的User 来使用自定义的User(用户模型)
# 配置信息为 ‘子应用名.模型类型’
AUTH_USER_MODEL = 'users.User'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# 修改系统的未登录默认跳转链接:
LOGIN_URL = '/login/'
# 设置上传的头像至blog/download/
MEDIA_ROOT = os.path.join(BASE_DIR,'download/')
# 设置图片访问的统一路由(用来解决头像不显示,路径错误http://127.0.0.1:8000/avatars/20210503/)
# 这时候会变成/download/avatar/...
MEDIA_URL = '/download/'
| true | true |
1c3b33f1b0f1ea6d04fb531fb279f87b672794cc | 232 | py | Python | ex007.py | cristianoandrad/ExerciciosPythonCursoEmVideo | 362603436b71c8ef8386d7a9ab3c5fed0b8d63f7 | [
"MIT"
] | null | null | null | ex007.py | cristianoandrad/ExerciciosPythonCursoEmVideo | 362603436b71c8ef8386d7a9ab3c5fed0b8d63f7 | [
"MIT"
] | null | null | null | ex007.py | cristianoandrad/ExerciciosPythonCursoEmVideo | 362603436b71c8ef8386d7a9ab3c5fed0b8d63f7 | [
"MIT"
] | null | null | null | # Desenvolva um programa que leia as duas notas de um aluno, calcule e mostre a sua média.
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
print('A media do aluno é {}'.format((n1+n2)/2))
| 38.666667 | 90 | 0.693966 |
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
print('A media do aluno é {}'.format((n1+n2)/2))
| true | true |
1c3b347aadee4591b9849140440593325cb8225f | 4,563 | py | Python | digsby/src/util/json.py | ifwe/digsby | f5fe00244744aa131e07f09348d10563f3d8fa99 | [
"Python-2.0"
] | 35 | 2015-08-15T14:32:38.000Z | 2021-12-09T16:21:26.000Z | digsby/src/util/json.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 4 | 2015-09-12T10:42:57.000Z | 2017-02-27T04:05:51.000Z | digsby/src/util/json.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 15 | 2015-07-10T23:58:07.000Z | 2022-01-23T22:16:33.000Z | import simplejson
# +---------------+-------------------+
# | JSON | Python |
# +===============+===================+
# | object | dict |
# +---------------+-------------------+
# | array | list |
# +---------------+-------------------+
# | string | unicode, !str |
# +---------------+-------------------+
# | number (int) | !int, !long |
# +---------------+-------------------+
# | number (real) | !float |
# +---------------+-------------------+
# | true | True |
# +---------------+-------------------+
# | false | False |
# +---------------+-------------------+
# | null | None |
# +---------------+-------------------+
# +---------------------+---------------+
# | Python | JSON |
# +=====================+===============+
# | dict | object |
# +---------------------+---------------+
# | list, !tuple | array |
# +---------------------+---------------+
# | !str, unicode | string |
# +---------------------+---------------+
# | !int, !long, !float | number |
# +---------------------+---------------+
# | True | true |
# +---------------------+---------------+
# | False | false |
# +---------------------+---------------+
# | None | null |
# +---------------------+---------------+
def serialize(thing):
if type(thing) is dict:
return dict((serialize(a), serialize(b)) for a,b in thing.iteritems())
elif isinstance(thing, str):
return '__str__' + thing
elif isinstance(thing, unicode):
return '__unicode__' + thing
elif isinstance(thing, bool):
if thing:
return '__True__'
else:
return '__False__'
elif isinstance(thing, (int, long)):
return '__int__' + str(thing)
elif isinstance(thing, float):
return '__float__' + repr(thing)
elif isinstance(thing, type(None)):
return '__None__'
elif type(thing) is tuple:
return {'__tuple__' : list(serialize(foo) for foo in thing)}
elif type(thing) is list:
return list(serialize(foo) for foo in thing)
elif type(thing) is set:
return {'__set__' : [serialize(foo) for foo in sorted(thing)]}
elif type(thing) is frozenset:
return {'__frozenset__' : [serialize(foo) for foo in sorted(thing)]}
else:
assert False, (type(thing), thing)
def unserialize(thing):
if type(thing) in (unicode, str):
if thing.startswith('__str__'):
return str(thing[7:])
if thing.startswith('__unicode__'):
return unicode(thing[11:])
if thing.startswith('__int__'):
return int(thing[7:])
if thing.startswith('__float__'):
return float(thing[9:])
if thing == '__None__':
return None
if thing == '__True__':
return True
if thing == '__False__':
return False
assert False, 'all incoming unicode should have been prepended'
return thing
if type(thing) is dict:
return dict((unserialize(foo),unserialize(bar)) for foo,bar in thing.iteritems())
elif type(thing) is set:
return set(unserialize(foo) for foo in thing)
elif type(thing) is frozenset:
return frozenset(unserialize(foo) for foo in thing)
elif type(thing) is tuple:
return tuple(unserialize(foo) for foo in thing)
elif type(thing) is list:
return list(unserialize(foo) for foo in thing)
else:
assert False, type(thing)
def untupleset(obj):
if '__tuple__' in obj:
assert len(obj) == 1
return tuple(obj['__tuple__'])
elif '__set__' in obj:
assert len(obj) == 1
return set(obj['__set__'])
elif '__frozenset__' in obj:
assert len(obj) == 1
return frozenset(obj['__frozenset__'])
return obj
def pydumps(obj):
return simplejson.dumps(serialize(obj), sort_keys=True, separators=(',', ':'))
def pyloads(obj):
return unserialize(simplejson.loads(obj, object_hook=untupleset))
__all__ = ['pydumps', 'pyloads']
if __name__=='__main__':
#TODO: this needs test cases
pass
| 36.798387 | 90 | 0.42735 | import simplejson
def serialize(thing):
if type(thing) is dict:
return dict((serialize(a), serialize(b)) for a,b in thing.iteritems())
elif isinstance(thing, str):
return '__str__' + thing
elif isinstance(thing, unicode):
return '__unicode__' + thing
elif isinstance(thing, bool):
if thing:
return '__True__'
else:
return '__False__'
elif isinstance(thing, (int, long)):
return '__int__' + str(thing)
elif isinstance(thing, float):
return '__float__' + repr(thing)
elif isinstance(thing, type(None)):
return '__None__'
elif type(thing) is tuple:
return {'__tuple__' : list(serialize(foo) for foo in thing)}
elif type(thing) is list:
return list(serialize(foo) for foo in thing)
elif type(thing) is set:
return {'__set__' : [serialize(foo) for foo in sorted(thing)]}
elif type(thing) is frozenset:
return {'__frozenset__' : [serialize(foo) for foo in sorted(thing)]}
else:
assert False, (type(thing), thing)
def unserialize(thing):
if type(thing) in (unicode, str):
if thing.startswith('__str__'):
return str(thing[7:])
if thing.startswith('__unicode__'):
return unicode(thing[11:])
if thing.startswith('__int__'):
return int(thing[7:])
if thing.startswith('__float__'):
return float(thing[9:])
if thing == '__None__':
return None
if thing == '__True__':
return True
if thing == '__False__':
return False
assert False, 'all incoming unicode should have been prepended'
return thing
if type(thing) is dict:
return dict((unserialize(foo),unserialize(bar)) for foo,bar in thing.iteritems())
elif type(thing) is set:
return set(unserialize(foo) for foo in thing)
elif type(thing) is frozenset:
return frozenset(unserialize(foo) for foo in thing)
elif type(thing) is tuple:
return tuple(unserialize(foo) for foo in thing)
elif type(thing) is list:
return list(unserialize(foo) for foo in thing)
else:
assert False, type(thing)
def untupleset(obj):
if '__tuple__' in obj:
assert len(obj) == 1
return tuple(obj['__tuple__'])
elif '__set__' in obj:
assert len(obj) == 1
return set(obj['__set__'])
elif '__frozenset__' in obj:
assert len(obj) == 1
return frozenset(obj['__frozenset__'])
return obj
def pydumps(obj):
return simplejson.dumps(serialize(obj), sort_keys=True, separators=(',', ':'))
def pyloads(obj):
return unserialize(simplejson.loads(obj, object_hook=untupleset))
__all__ = ['pydumps', 'pyloads']
if __name__=='__main__':
pass
| true | true |
1c3b34d1e89985ed582278274611ebbe4199d7dc | 533 | py | Python | Lesson2/pandas_and_json.py | rmhyman/DataScience | c839c97c76f104ab298563a5c8b48f6d90be5f60 | [
"MIT"
] | 1 | 2015-09-17T18:49:09.000Z | 2015-09-17T18:49:09.000Z | Lesson2/pandas_and_json.py | rmhyman/DataScience | c839c97c76f104ab298563a5c8b48f6d90be5f60 | [
"MIT"
] | null | null | null | Lesson2/pandas_and_json.py | rmhyman/DataScience | c839c97c76f104ab298563a5c8b48f6d90be5f60 | [
"MIT"
] | null | null | null | import json
import requests
import pprint
def api_get_request(url):
# In this exercise, you want to call the last.fm API to get a list of the
# top artists in Spain.
#
# Once you've done this, return the name of the number 1 top artist in Spain.
data = requests.get(url).text
df = json.loads(data)
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(df['topartists']['artist']['rank' == 1]['name'])
return df['topartists']['artist']['rank' == 1]['name'] # return the top artist in Spain
| 35.533333 | 92 | 0.645403 | import json
import requests
import pprint
def api_get_request(url):
data = requests.get(url).text
df = json.loads(data)
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(df['topartists']['artist']['rank' == 1]['name'])
return df['topartists']['artist']['rank' == 1]['name'] # return the top artist in Spain
| true | true |
1c3b35595be26ce133518cd58d3d352daa473df5 | 804 | py | Python | setup.py | t3m8ch/clepsydra | 5c755a91713ae70432627aec381f567aeed92f56 | [
"Apache-2.0"
] | 14 | 2021-11-10T03:07:13.000Z | 2022-02-17T07:13:51.000Z | setup.py | t3m8ch/clepsydra | 5c755a91713ae70432627aec381f567aeed92f56 | [
"Apache-2.0"
] | 12 | 2021-11-09T20:05:30.000Z | 2022-01-09T08:58:48.000Z | setup.py | t3m8ch/clepsydra | 5c755a91713ae70432627aec381f567aeed92f56 | [
"Apache-2.0"
] | 1 | 2022-02-18T07:33:53.000Z | 2022-02-18T07:33:53.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from os import path
from setuptools import setup, find_packages
this_directory = path.abspath(path.dirname(__file__))
setup(
name='clepsydra',
description='Liquid scheduler for python',
version='0.1a1',
url='https://github.com/tishka17/clepsydra',
author='A. Tikhonov',
author_email='17@itishka.org',
license='Apache2',
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3'
],
packages=find_packages(include=['clepsydra', 'clepsydra.*']),
install_requires=[
],
extras_require={
},
package_data={
},
python_requires=">=3.8",
)
| 24.363636 | 65 | 0.634328 |
from os import path
from setuptools import setup, find_packages
this_directory = path.abspath(path.dirname(__file__))
setup(
name='clepsydra',
description='Liquid scheduler for python',
version='0.1a1',
url='https://github.com/tishka17/clepsydra',
author='A. Tikhonov',
author_email='17@itishka.org',
license='Apache2',
classifiers=[
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3'
],
packages=find_packages(include=['clepsydra', 'clepsydra.*']),
install_requires=[
],
extras_require={
},
package_data={
},
python_requires=">=3.8",
)
| true | true |
1c3b36913d4c6e836eb061c53306b6be514f88e4 | 3,090 | py | Python | alipay/aop/api/domain/KbAdvertAdvSingleVoucherResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/KbAdvertAdvSingleVoucherResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/KbAdvertAdvSingleVoucherResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KbAdvertAdvContentResponse import KbAdvertAdvContentResponse
from alipay.aop.api.domain.KbAdvertAdvContent import KbAdvertAdvContent
from alipay.aop.api.domain.KbAdvertSubjectVoucherResponse import KbAdvertSubjectVoucherResponse
class KbAdvertAdvSingleVoucherResponse(object):
def __init__(self):
self._adv_content_list = None
self._content = None
self._voucher = None
@property
def adv_content_list(self):
return self._adv_content_list
@adv_content_list.setter
def adv_content_list(self, value):
if isinstance(value, list):
self._adv_content_list = list()
for i in value:
if isinstance(i, KbAdvertAdvContentResponse):
self._adv_content_list.append(i)
else:
self._adv_content_list.append(KbAdvertAdvContentResponse.from_alipay_dict(i))
@property
def content(self):
return self._content
@content.setter
def content(self, value):
if isinstance(value, KbAdvertAdvContent):
self._content = value
else:
self._content = KbAdvertAdvContent.from_alipay_dict(value)
@property
def voucher(self):
return self._voucher
@voucher.setter
def voucher(self, value):
if isinstance(value, KbAdvertSubjectVoucherResponse):
self._voucher = value
else:
self._voucher = KbAdvertSubjectVoucherResponse.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.adv_content_list:
if isinstance(self.adv_content_list, list):
for i in range(0, len(self.adv_content_list)):
element = self.adv_content_list[i]
if hasattr(element, 'to_alipay_dict'):
self.adv_content_list[i] = element.to_alipay_dict()
if hasattr(self.adv_content_list, 'to_alipay_dict'):
params['adv_content_list'] = self.adv_content_list.to_alipay_dict()
else:
params['adv_content_list'] = self.adv_content_list
if self.content:
if hasattr(self.content, 'to_alipay_dict'):
params['content'] = self.content.to_alipay_dict()
else:
params['content'] = self.content
if self.voucher:
if hasattr(self.voucher, 'to_alipay_dict'):
params['voucher'] = self.voucher.to_alipay_dict()
else:
params['voucher'] = self.voucher
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbAdvertAdvSingleVoucherResponse()
if 'adv_content_list' in d:
o.adv_content_list = d['adv_content_list']
if 'content' in d:
o.content = d['content']
if 'voucher' in d:
o.voucher = d['voucher']
return o
| 33.956044 | 97 | 0.621359 |
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KbAdvertAdvContentResponse import KbAdvertAdvContentResponse
from alipay.aop.api.domain.KbAdvertAdvContent import KbAdvertAdvContent
from alipay.aop.api.domain.KbAdvertSubjectVoucherResponse import KbAdvertSubjectVoucherResponse
class KbAdvertAdvSingleVoucherResponse(object):
def __init__(self):
self._adv_content_list = None
self._content = None
self._voucher = None
@property
def adv_content_list(self):
return self._adv_content_list
@adv_content_list.setter
def adv_content_list(self, value):
if isinstance(value, list):
self._adv_content_list = list()
for i in value:
if isinstance(i, KbAdvertAdvContentResponse):
self._adv_content_list.append(i)
else:
self._adv_content_list.append(KbAdvertAdvContentResponse.from_alipay_dict(i))
@property
def content(self):
return self._content
@content.setter
def content(self, value):
if isinstance(value, KbAdvertAdvContent):
self._content = value
else:
self._content = KbAdvertAdvContent.from_alipay_dict(value)
@property
def voucher(self):
return self._voucher
@voucher.setter
def voucher(self, value):
if isinstance(value, KbAdvertSubjectVoucherResponse):
self._voucher = value
else:
self._voucher = KbAdvertSubjectVoucherResponse.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.adv_content_list:
if isinstance(self.adv_content_list, list):
for i in range(0, len(self.adv_content_list)):
element = self.adv_content_list[i]
if hasattr(element, 'to_alipay_dict'):
self.adv_content_list[i] = element.to_alipay_dict()
if hasattr(self.adv_content_list, 'to_alipay_dict'):
params['adv_content_list'] = self.adv_content_list.to_alipay_dict()
else:
params['adv_content_list'] = self.adv_content_list
if self.content:
if hasattr(self.content, 'to_alipay_dict'):
params['content'] = self.content.to_alipay_dict()
else:
params['content'] = self.content
if self.voucher:
if hasattr(self.voucher, 'to_alipay_dict'):
params['voucher'] = self.voucher.to_alipay_dict()
else:
params['voucher'] = self.voucher
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbAdvertAdvSingleVoucherResponse()
if 'adv_content_list' in d:
o.adv_content_list = d['adv_content_list']
if 'content' in d:
o.content = d['content']
if 'voucher' in d:
o.voucher = d['voucher']
return o
| true | true |
1c3b379f9d617573f18aed9d0cc8086eb81f5c44 | 11,849 | py | Python | src/m5_more_sequences.py | manchekl/12-MoreSequences | 2bc19eaee820e13594b95fc105124c96dbe91b63 | [
"MIT"
] | null | null | null | src/m5_more_sequences.py | manchekl/12-MoreSequences | 2bc19eaee820e13594b95fc105124c96dbe91b63 | [
"MIT"
] | null | null | null | src/m5_more_sequences.py | manchekl/12-MoreSequences | 2bc19eaee820e13594b95fc105124c96dbe91b63 | [
"MIT"
] | null | null | null | """
This module lets you practice various patterns
for ITERATING through SEQUENCES, including:
-- Beginning to end
-- Other ranges (e.g., backwards and every-3rd-item)
-- The COUNT/SUM/etc pattern
-- The FIND pattern (via LINEAR SEARCH)
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Krista Manche.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
run_test_sum_radii()
run_test_count_last_n_odds()
run_test_index_of_first_negative()
run_test_contains_an_a()
###############################################################################
# Many problems simply iterate (loop) through ALL of the sequence,
# as in the sum_radii problem below.
###############################################################################
def run_test_sum_radii():
""" Tests the sum_radii function. """
print()
print('--------------------------------------------------')
print('Testing the sum_radii function:')
print('--------------------------------------------------')
# Test 1 is ALREADY DONE (here).
print()
circle1 = rg.Circle(rg.Point(100, 100), 25)
circle2 = rg.Circle(rg.Point(100, 100), 50)
circle3 = rg.Circle(rg.Point(100, 100), 10)
expected = 85
seq = (circle1, circle2, circle3)
actual = sum_radii(seq)
print('Expected:', expected)
print('Actual: ', actual)
# Test 2 is ALREADY DONE (here).
print()
circle1 = rg.Circle(rg.Point(200, 20), 80)
circle2 = rg.Circle(rg.Point(300, 100), 60)
circle3 = rg.Circle(rg.Point(100, 150), 0)
circle4 = rg.Circle(rg.Point(0, 0), 30)
expected = 170
seq = (circle1, circle2, circle3, circle4)
actual = sum_radii(seq)
print('Expected:', expected)
print('Actual: ', actual)
def sum_radii(circles):
"""
What comes in:
-- a sequence of rg.Circle objects
What goes out:
Returns the sum of the radii of the given sequence of rg.Circles.
Side effects: None.
Example: If
circle1 = rg.Circle(rg.Point(999, 100), 25)
circle2 = rg.Circle(rg.Point(888, 200), 50)
circle3 = rg.Circle(rg.Point(777, 300), 10)
then sum_radii([circle1, circle2, circle3])
returns 25 + 50 + 10, which is 85.
Type hints:
:type circles: list | tuple of rg.Circle
:rtype: int | float
"""
# -------------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# The testing code is already written for you (above).
#
# Note: No fair using "slices" on ANY of these problems,
# if you happen to know what they are.
#
# Likewise, no fair using any builtin methods on sequences
# or strings, if you happen to know any.
#
# Instead, use explicit loops, as you have for other problems.
# -------------------------------------------------------------------------
total=0
for k in range (len(circles)):
total=total+circles[k].radius
return total
###############################################################################
# Some problems iterate (loop) through PART of the sequence,
# perhaps BACKWARDS, as in the count_last_n_odds problem below.
###############################################################################
def run_test_count_last_n_odds():
""" Tests the count_last_n_odds function. """
print()
print('--------------------------------------------------')
print('Testing the count_last_n_odds function:')
print('--------------------------------------------------')
# Six tests - ALREADY DONE (here).
seq = [1, 5, 88, 44, 33, 77, 10, 12, 9]
answer1 = count_last_n_odds(seq, 0)
answer2 = count_last_n_odds(seq, 1)
answer3 = count_last_n_odds(seq, 6)
answer4 = count_last_n_odds(seq, 7)
answer5 = count_last_n_odds(seq, 8)
answer6 = count_last_n_odds(seq, 9)
print()
print('Test set #1 of count_last_n_odds:',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: 0 1 3 3 4 5')
# Six more tests - ALREADY DONE (here).
seq = [17, 88, -5, -10, 0]
answer1 = count_last_n_odds(seq, 0)
answer2 = count_last_n_odds(seq, 1)
answer3 = count_last_n_odds(seq, 2)
answer4 = count_last_n_odds(seq, 3)
answer5 = count_last_n_odds(seq, 4)
answer6 = count_last_n_odds(seq, 5)
print()
print('Test set #2 of count_last_n_odds:',
answer1, answer2, answer3, answer4, answer5, answer6)
print('The above should be: 0 0 0 1 1 2')
def count_last_n_odds(integers, n):
"""
What comes in:
-- a sequence of integers
-- a non-negative integer n that is less than or equal to
the length of the given sequence
What goes out: Returns the number of odd integers
in the last n items of the given sequence.
Side effects: None.
Examples:
If the sequence is (13, 66, 15, 3), then:
count_last_n_odds(sequence, 0) is 0 [no odds]
count_last_n_odds(sequence, 1) is 1 [1 odd, namely 3]
count_last_n_odds(sequence, 2) is 2 [2 odds, namely 3 and 15]
count_last_n_odds(sequence, 3) is 2 [2 odds, namely 3 and 15]
count_last_n_odds(sequence, 4) is 3 [3 odds: 3, 15 and 13]
Type hints:
:type integers: list | tuple of int
:type n: int
:rtype: int
"""
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# The testing code is already written for you (above).
# -------------------------------------------------------------------------
total=0
for k in range (len(integers)-1,len(integers)-1-n,-1):
if integers[k]%2!=0:
total=total+1
return total
###############################################################################
# Some problems iterate (loop) through PART of the sequence,
# stopping when the loop FINDS something of interest
# (or continuing to the end if it does NOT find the thing of interest),
# as in the following problems:
###############################################################################
def run_test_index_of_first_negative():
""" Tests the index_of_first_negative function. """
print()
print('--------------------------------------------------')
print('Testing the index_of_first_negative function:')
print('--------------------------------------------------')
# Test 1:
print()
expected = 3
actual = index_of_first_negative([90, 0, 20, -5, 30, -10, 15])
print('Expected:', expected)
print('Actual: ', actual)
# Test 2:
print()
expected = 0
actual = index_of_first_negative([-5, 30, -10, 15])
print('Expected:', expected)
print('Actual: ', actual)
# Test 3:
print()
expected = 4
actual = index_of_first_negative([5, 30, 10, 15, -1])
print('Expected:', expected)
print('Actual: ', actual)
# Test 4:
print()
expected = -1
actual = index_of_first_negative([5, 30, 10, 15, 1, 6])
print('Expected:', expected)
print('Actual: ', actual)
if actual == '-1':
print(' Your answer is WRONG.')
print(' You returned the STRING \'-1\'')
print(' when you should have returned just -1')
def index_of_first_negative(numbers):
"""
What comes in:
-- a sequence of numbers
What goes out: Returns the INDEX of the first negative number
in the given sequence of numbers, or -1 if the sequence
contains no negative numbers.
Note: "first" negative number means the negative number
whose index is smallest -- see the examples.
Side effects: None.
Examples: If the argument is:
-- [4, 30, -19, 8, -3, -50, 100], this function returns 2
since the first negative number is -19, which is at index 2
-- [-8, 44, 33], this function returns 0
since the first negative number is -8, which is at index 0
-- [1, 29, 22, 8], this function returns -1
since the list contains no negative numbers
Type hints:
:type numbers: list | tuple of float | int
:rtype: int
"""
# -------------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# The testing code is already written for you (above).
# -------------------------------------------------------------------------
for k in range (len(numbers)):
if numbers[k]<0:
return k
return -1
def run_test_contains_an_a():
""" Tests the contains_an_a function. """
print()
print('--------------------------------------------------')
print('Testing the contains_an_a function:')
print('--------------------------------------------------')
# Tests:
actual1 = contains_an_a('nope')
actual2 = contains_an_a('yes a is here')
actual3 = contains_an_a('many aaaaas aaa aaa')
actual4 = contains_an_a('not until the very end is a')
actual5 = contains_an_a('a @ the beginning')
actual6 = contains_an_a('')
actual7 = contains_an_a('BLAH BLAH BLAH')
actual8 = contains_an_a('BLAH BLAH BLAH \t MORE BLAH')
actual9 = contains_an_a('BLAH BLAH BLAH \t MORE BLaH')
actuals = (actual1, actual2, actual3, actual4, actual5, actual6,
actual7, actual8, actual9)
expecteds = (False, True, True, True, True, False,
False, False, True)
for k in range(len(actuals)):
print()
print('Expected:', expecteds[k])
print('Actual: ', actuals[k])
if type(actuals[k]) is str and str(expecteds[k]) == actuals[k]:
print('Your code FAILED this test for contains_an_a.')
print(' You appear to have returned the STRING:')
print(' "' + actuals[k] + '"')
print(' instead of the built-in constant:')
print(' ' + str(expecteds[k]))
def contains_an_a(s):
"""
What comes in:
-- a string
What goes out: Returns True if the given string contains
the character 'a'. Returns False if the given string
does not contain the character 'a'.
Side effects: None.
Examples:
-- contains_an_a('blah blah blah') returns True
-- contains_an_a('BLAH BLAH BLAH') returns False
-- contains_an_a('abc') returns True
-- contains_an_a('') returns False
Type hints:
:type s: str
:rtype: bool
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# The testing code is already written for you (above).
#
###########################################################################
# IMPORTANT:
# -- True and False are built-in constants.
# Do NOT return the STRINGs 'True' and 'False'.
###########################################################################
#
# Implementation requirement:
# Use an explicit loop, as you have done in the other problems.
# No fair using the count or find string methods.
# -------------------------------------------------------------------------
for k in range (len(s)):
if s[k]=='a':
return True
return False
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 35.797583 | 79 | 0.520213 |
import rosegraphics as rg
def main():
run_test_sum_radii()
run_test_count_last_n_odds()
run_test_index_of_first_negative()
run_test_contains_an_a()
| true | true |
1c3b38e84c3406747fd2a13b8335964a1d70b3b2 | 878 | py | Python | setup.py | msantino/publish-event-sns | 49e64d73f84a5120833abadd5532bbaca85a4bfb | [
"MIT"
] | null | null | null | setup.py | msantino/publish-event-sns | 49e64d73f84a5120833abadd5532bbaca85a4bfb | [
"MIT"
] | null | null | null | setup.py | msantino/publish-event-sns | 49e64d73f84a5120833abadd5532bbaca85a4bfb | [
"MIT"
] | null | null | null | import io
from os import path
from setuptools import setup, find_packages
MYDIR = path.abspath(path.dirname(__file__))
cmdclass = {}
ext_modules = []
setup(
name='publish_event_sns',
version='0.0.3',
author="Marcelo Santino",
author_email="eu@marcelosantino.com.br",
description="Publish message into SNS Topic with attributes",
url='https://github.com/msantino/publish-event-sns',
long_description=io.open('README.md', 'r', encoding='utf-8').read(),
long_description_content_type="text/markdown",
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
setup_requires=[],
cmdclass=cmdclass,
ext_modules=ext_modules,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | 29.266667 | 72 | 0.682232 | import io
from os import path
from setuptools import setup, find_packages
MYDIR = path.abspath(path.dirname(__file__))
cmdclass = {}
ext_modules = []
setup(
name='publish_event_sns',
version='0.0.3',
author="Marcelo Santino",
author_email="eu@marcelosantino.com.br",
description="Publish message into SNS Topic with attributes",
url='https://github.com/msantino/publish-event-sns',
long_description=io.open('README.md', 'r', encoding='utf-8').read(),
long_description_content_type="text/markdown",
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
setup_requires=[],
cmdclass=cmdclass,
ext_modules=ext_modules,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | true | true |
1c3b39831ee64c095ad2623ef02eba501cfee32b | 3,146 | py | Python | datasets/qm9.py | xptree/nmp_qc | 45d94a885753c670db1455e015cede410e1720a5 | [
"MIT"
] | null | null | null | datasets/qm9.py | xptree/nmp_qc | 45d94a885753c670db1455e015cede410e1720a5 | [
"MIT"
] | null | null | null | datasets/qm9.py | xptree/nmp_qc | 45d94a885753c670db1455e015cede410e1720a5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
qm9.py:
Usage:
"""
# Networkx should be imported before torch
import networkx as nx
import torch.utils.data as data
import numpy as np
import argparse
import datasets.utils as utils
import time
import os,sys
import torch
reader_folder = os.path.realpath( os.path.abspath('../GraphReader'))
if reader_folder not in sys.path:
sys.path.insert(1, reader_folder)
from GraphReader.graph_reader import xyz_graph_reader
__author__ = "Pau Riba, Anjan Dutta"
__email__ = "priba@cvc.uab.cat, adutta@cvc.uab.cat"
_label_names = ['mu', 'alpha', 'homo', 'lumo', 'gap', 'r2',
'zpve', 'U0', 'U', 'H', 'G', 'Cv']
class Qm9(data.Dataset):
# Constructor
def __init__(self, root_path, ids, vertex_transform=utils.qm9_nodes, edge_transform=utils.qm9_edges,
target_transform=None, e_representation='raw_distance', labels=None):
self.root = root_path
self.ids = ids
self.vertex_transform = vertex_transform
self.edge_transform = edge_transform
self.target_transform = target_transform
self.e_representation = e_representation
labels = labels or _label_names
if isinstance(labels, str):
labels = [labels, ]
self.labels_id = np.array([_label_names.index(x) for x in labels])
def __getitem__(self, index):
g, target = xyz_graph_reader(os.path.join(self.root, self.ids[index]),
labels_id=self.labels_id)
if self.vertex_transform is not None:
h = self.vertex_transform(g)
if self.edge_transform is not None:
g, e = self.edge_transform(g, self.e_representation)
if self.target_transform is not None:
target = self.target_transform(target)
return (g, h, e), target
def __len__(self):
return len(self.ids)
def set_target_transform(self, target_transform):
self.target_transform = target_transform
if __name__ == '__main__':
# Parse optios for downloading
parser = argparse.ArgumentParser(description='QM9 Object.')
# Optional argument
parser.add_argument('--root', nargs=1, help='Specify the data directory.', default=['../data/qm9/dsgdb9nsd'])
args = parser.parse_args()
root = args.root[0]
files = [f for f in os.listdir(root) if os.path.isfile(os.path.join(root, f))]
idx = np.random.permutation(len(files))
idx = idx.tolist()
valid_ids = [files[i] for i in idx[0:10000]]
test_ids = [files[i] for i in idx[10000:20000]]
train_ids = [files[i] for i in idx[20000:]]
data_train = Qm9(root, train_ids, vertex_transform=utils.qm9_nodes, edge_transform=lambda g: utils.qm9_edges(g, e_representation='raw_distance'))
data_valid = Qm9(root, valid_ids)
data_test = Qm9(root, test_ids)
print(len(data_train))
print(len(data_valid))
print(len(data_test))
print(data_train[1])
print(data_valid[1])
print(data_test[1])
start = time.time()
print(utils.get_graph_stats(data_valid, 'degrees'))
end = time.time()
print('Time Statistics Par')
print(end - start)
| 28.862385 | 149 | 0.665607 |
import networkx as nx
import torch.utils.data as data
import numpy as np
import argparse
import datasets.utils as utils
import time
import os,sys
import torch
reader_folder = os.path.realpath( os.path.abspath('../GraphReader'))
if reader_folder not in sys.path:
sys.path.insert(1, reader_folder)
from GraphReader.graph_reader import xyz_graph_reader
__author__ = "Pau Riba, Anjan Dutta"
__email__ = "priba@cvc.uab.cat, adutta@cvc.uab.cat"
_label_names = ['mu', 'alpha', 'homo', 'lumo', 'gap', 'r2',
'zpve', 'U0', 'U', 'H', 'G', 'Cv']
class Qm9(data.Dataset):
def __init__(self, root_path, ids, vertex_transform=utils.qm9_nodes, edge_transform=utils.qm9_edges,
target_transform=None, e_representation='raw_distance', labels=None):
self.root = root_path
self.ids = ids
self.vertex_transform = vertex_transform
self.edge_transform = edge_transform
self.target_transform = target_transform
self.e_representation = e_representation
labels = labels or _label_names
if isinstance(labels, str):
labels = [labels, ]
self.labels_id = np.array([_label_names.index(x) for x in labels])
def __getitem__(self, index):
g, target = xyz_graph_reader(os.path.join(self.root, self.ids[index]),
labels_id=self.labels_id)
if self.vertex_transform is not None:
h = self.vertex_transform(g)
if self.edge_transform is not None:
g, e = self.edge_transform(g, self.e_representation)
if self.target_transform is not None:
target = self.target_transform(target)
return (g, h, e), target
def __len__(self):
return len(self.ids)
def set_target_transform(self, target_transform):
self.target_transform = target_transform
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='QM9 Object.')
parser.add_argument('--root', nargs=1, help='Specify the data directory.', default=['../data/qm9/dsgdb9nsd'])
args = parser.parse_args()
root = args.root[0]
files = [f for f in os.listdir(root) if os.path.isfile(os.path.join(root, f))]
idx = np.random.permutation(len(files))
idx = idx.tolist()
valid_ids = [files[i] for i in idx[0:10000]]
test_ids = [files[i] for i in idx[10000:20000]]
train_ids = [files[i] for i in idx[20000:]]
data_train = Qm9(root, train_ids, vertex_transform=utils.qm9_nodes, edge_transform=lambda g: utils.qm9_edges(g, e_representation='raw_distance'))
data_valid = Qm9(root, valid_ids)
data_test = Qm9(root, test_ids)
print(len(data_train))
print(len(data_valid))
print(len(data_test))
print(data_train[1])
print(data_valid[1])
print(data_test[1])
start = time.time()
print(utils.get_graph_stats(data_valid, 'degrees'))
end = time.time()
print('Time Statistics Par')
print(end - start)
| true | true |
1c3b39a0fcda1374ba1e8bc526c006af509aca72 | 2,411 | py | Python | utils.py | atul04/Grammar-Correction | 89ee3338f901735cbad2144e5e41a54ee11213f9 | [
"MIT"
] | null | null | null | utils.py | atul04/Grammar-Correction | 89ee3338f901735cbad2144e5e41a54ee11213f9 | [
"MIT"
] | null | null | null | utils.py | atul04/Grammar-Correction | 89ee3338f901735cbad2144e5e41a54ee11213f9 | [
"MIT"
] | null | null | null | import torch
import spacy
from torchtext.data.metrics import bleu_score
import sys
def translate_sentence(model, sentence, german, english, device, max_length=50):
# Load german tokenizer
spacy_ger = spacy.load("en")
# Create tokens using spacy and everything in lower case (which is what our vocab is)
if type(sentence) == str:
tokens = [token.text.lower() for token in spacy_ger(sentence)]
else:
tokens = [token.lower() for token in sentence]
# Add <SOS> and <EOS> in beginning and end respectively
tokens.insert(0, german.init_token)
tokens.append(german.eos_token)
# Go through each german token and convert to an index
text_to_indices = [german.vocab.stoi[token] for token in tokens]
# Convert to Tensor
sentence_tensor = torch.LongTensor(text_to_indices).unsqueeze(1).to(device)
# Build encoder hidden, cell state
with torch.no_grad():
outputs_encoder, hiddens, cells = model.encoder(sentence_tensor)
outputs = [english.vocab.stoi["<sos>"]]
for _ in range(max_length):
previous_word = torch.LongTensor([outputs[-1]]).to(device)
with torch.no_grad():
output, hiddens, cells = model.decoder(
previous_word, outputs_encoder, hiddens, cells
)
best_guess = output.argmax(1).item()
outputs.append(best_guess)
# Model predicts it's the end of the sentence
if output.argmax(1).item() == english.vocab.stoi["<eos>"]:
break
translated_sentence = [english.vocab.itos[idx] for idx in outputs]
# remove start token
return translated_sentence[1:]
def bleu(data, model, german, english, device):
targets = []
outputs = []
for example in data:
src = vars(example)["source"]
trg = vars(example)["target"]
prediction = translate_sentence(model, src, german, english, device)
prediction = prediction[:-1] # remove <eos> token
targets.append([trg])
outputs.append(prediction)
return bleu_score(outputs, targets)
def save_checkpoint(state, filename="my_checkpoint.pth.tar"):
print("=> Saving checkpoint")
torch.save(state, filename)
def load_checkpoint(checkpoint, model, optimizer):
print("=> Loading checkpoint")
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"]) | 30.518987 | 89 | 0.669432 | import torch
import spacy
from torchtext.data.metrics import bleu_score
import sys
def translate_sentence(model, sentence, german, english, device, max_length=50):
spacy_ger = spacy.load("en")
if type(sentence) == str:
tokens = [token.text.lower() for token in spacy_ger(sentence)]
else:
tokens = [token.lower() for token in sentence]
tokens.insert(0, german.init_token)
tokens.append(german.eos_token)
text_to_indices = [german.vocab.stoi[token] for token in tokens]
sentence_tensor = torch.LongTensor(text_to_indices).unsqueeze(1).to(device)
with torch.no_grad():
outputs_encoder, hiddens, cells = model.encoder(sentence_tensor)
outputs = [english.vocab.stoi["<sos>"]]
for _ in range(max_length):
previous_word = torch.LongTensor([outputs[-1]]).to(device)
with torch.no_grad():
output, hiddens, cells = model.decoder(
previous_word, outputs_encoder, hiddens, cells
)
best_guess = output.argmax(1).item()
outputs.append(best_guess)
if output.argmax(1).item() == english.vocab.stoi["<eos>"]:
break
translated_sentence = [english.vocab.itos[idx] for idx in outputs]
# remove start token
return translated_sentence[1:]
def bleu(data, model, german, english, device):
targets = []
outputs = []
for example in data:
src = vars(example)["source"]
trg = vars(example)["target"]
prediction = translate_sentence(model, src, german, english, device)
prediction = prediction[:-1] # remove <eos> token
targets.append([trg])
outputs.append(prediction)
return bleu_score(outputs, targets)
def save_checkpoint(state, filename="my_checkpoint.pth.tar"):
print("=> Saving checkpoint")
torch.save(state, filename)
def load_checkpoint(checkpoint, model, optimizer):
print("=> Loading checkpoint")
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"]) | true | true |
1c3b3b3590471ebbc7d8519cc5359bc0b07f4db5 | 12,468 | py | Python | cloudcafe/compute/hypervisors/xenserver/models/virtual_machine.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/compute/hypervisors/xenserver/models/virtual_machine.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | null | null | null | cloudcafe/compute/hypervisors/xenserver/models/virtual_machine.py | rcbops-qa/cloudcafe | d937f85496aadafbb94a330b9adb8ea18bee79ba | [
"Apache-2.0"
] | 1 | 2020-04-13T17:44:28.000Z | 2020-04-13T17:44:28.000Z | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.engine.models.base import BaseModel
class VirtualMachine(BaseModel):
def __init__(self, label=None, description=None, vcpus_at_startup=None,
power_state=None, vcpus_params=None, vcpus_max=None,
xenstore_data=None, memory_static_min=None,
memory_static_max=None, memory_dynamic_min=None,
memory_dynamic_max=None, allowed_operations=None,
blocked_operations=None, ha_restart_priority=None,
pv_bootloader=None, snapshots=None, shutdown_delay=None,
domid=None, pci_bus=None, children=None,
hvm_shadow_multiplier=None, start_delay=None,
actions_after_crash=None, memory_target=None, uuid=None,
pv_ramdisk=None, tags=None, recommendations=None,
is_control_domain=None, hvm_boot_params=None,
snapshot_time=None, actions_after_shutdown=None,
user_version=None, snapshot_info=None,
transportable_snapshot_id=None, is_a_template=None,
crash_dumps=None, is_snapshot_from_vmpp=None,
is_a_snapshot=None, blobs=None, version=None,
current_operations=None, domarch=None,
pv_bootloader_args=None, snapshot_metadata=None,
other_config=None, actions_after_reboot=None,
attached_pcis=None, pv_legacy_args=None, bios_strings=None,
last_boot_cpu_flags=None, order=None):
super(VirtualMachine, self).__init__()
self.label = label
self.description = description
self.vcpus_at_startup = vcpus_at_startup
self.power_state = power_state
self.vcpus_params = vcpus_params
self.vcpus_max = vcpus_max
self.xenstore_data = xenstore_data
self.memory_static_min = memory_static_min
self.memory_static_max = memory_static_max
self.memory_dynamic_max = memory_dynamic_min
self.memory_dynamic_min = memory_dynamic_max
self.allowed_operations = allowed_operations
self.blocked_operations = blocked_operations
self.ha_restart_priority = ha_restart_priority
self.pv_bootloader = pv_bootloader
self.snapshots = snapshots
self.shutdown_delay = shutdown_delay
self.domid = domid
self.pci_bus = pci_bus
self.children = children
self.hvm_shadow_multiplier = hvm_shadow_multiplier
self.start_delay = start_delay
self.actions_after_crash = actions_after_crash
self.memory_target = memory_target
self.uuid = uuid
self.pv_ramdisk = pv_ramdisk
self.tags = tags
self.recommendations = recommendations
self.is_control_domain = is_control_domain
self.hvm_boot_params = hvm_boot_params
self.snapshot_time = snapshot_time
self.actions_after_shutdown = actions_after_shutdown
self.user_version = user_version
self.snapshot_info = snapshot_info
self.transportable_snapshot_id = transportable_snapshot_id
self.is_a_template = is_a_template
self.crash_dumps = crash_dumps
self.is_snapshot_from_vmpp = is_snapshot_from_vmpp
self.is_a_snapshot = is_a_snapshot
self.blobs = blobs
self.version = version
self.current_operations = current_operations
self.domarch = domarch
self.pv_bootloader_args = pv_bootloader_args
self.snapshot_metadata = snapshot_metadata
self.other_config = other_config
self.actions_after_reboot = actions_after_reboot
self.attached_pcis = attached_pcis
self.pv_legacy_args = pv_legacy_args
self.bios_strings = bios_strings
self.last_boot_cpu_flags = last_boot_cpu_flags
self.order = order
@classmethod
def _dict_to_obj(cls, **kwargs):
vm = VirtualMachine(
label=kwargs.get('name_label'),
description=kwargs.get('name_description'),
vcpus_at_startup=kwargs.get('VCPUs_at_startup'),
power_state=kwargs.get('power_state'),
vcpus_params=kwargs.get('vcpus_params'),
vcpus_max=kwargs.get('VCPUs_max'), version=kwargs.get('version'),
xenstore_data=kwargs.get('xenstore_data'),
memory_static_min=kwargs.get('memory_static_min'),
memory_static_max=kwargs.get('memory_static_max'),
memory_dynamic_min=kwargs.get('memory_dynamic_min'),
memory_dynamic_max=kwargs.get('memory_dynamic_max'),
allowed_operations=kwargs.get('allowed_operations'),
blocked_operations=kwargs.get('blocked_operations'),
ha_restart_priority=kwargs.get('ha_restart_priority'),
pv_bootloader=kwargs.get('PV_bootloader'),
snapshots=kwargs.get('snapshots'), domid=kwargs.get('domid'),
shutdown_delay=kwargs.get('shutdown_delay'),
pci_bus=kwargs.get('PCI_bus'), children=kwargs.get('children'),
hvm_shadow_multiplier=kwargs.get('HVM_shadow_multiplier'),
start_delay=kwargs.get('start_delay'), tags=kwargs.get('tags'),
actions_after_crash=kwargs.get('actions_after_crash'),
memory_target=kwargs.get('memory_target'),
uuid=kwargs.get('uuid'), pv_ramdisk=kwargs.get('pv_ramdisk'),
recommendations=kwargs.get('recommendations'),
is_control_domain=kwargs.get('is_control_domain'),
hvm_boot_params=kwargs.get('HVM_boot_params'),
snapshot_time=kwargs.get('snapshot_time'),
actions_after_shutdown=kwargs.get('actions_after_shutdown'),
user_version=kwargs.get('user_version'),
snapshot_info=kwargs.get('snapshot_info'),
transportable_snapshot_id=kwargs.get('transportable_snapshot_id'),
is_a_template=kwargs.get('is_a_template'),
crash_dumps=kwargs.get('crash_dumps'), blobs=kwargs.get('blobs'),
is_snapshot_from_vmpp=kwargs.get('is_snapshot_from_vmpp'),
is_a_snapshot=kwargs.get('is_a_snapshot'),
current_operations=kwargs.get('current_operations'),
domarch=kwargs.get('domarch'), order=kwargs.get('order'),
pv_bootloader_args=kwargs.get('PV_bootloader_args'),
snapshot_metadata=kwargs.get('snapshot_metadata'),
other_config=kwargs.get('other_config'),
actions_after_reboot=kwargs.get('actions_after_reboot'),
attached_pcis=kwargs.get('attached_PCIs'),
pv_legacy_args=kwargs.get('PV_legacy_args'),
bios_strings=kwargs.get('bios_strings'),
last_boot_cpu_flags=kwargs.get('last_boot_cpu_flags'))
return vm
class VirtualBlockDevice(BaseModel):
def __init__(
self, userdevice=None, runtime_properties=None,
allowed_operations=None, uuid=None, storage_lock=None,
qos_supported_algorithms=None, status_code=None,
type=None, empty=None, status_detail=None, device=None,
qos_algorithm_type=None, unpluggable=None,
current_operations=None, bootable=None, other_config=None,
currently_attached=None, mode=None, qos_algorithm_params=None):
super(VirtualBlockDevice, self).__init__()
self.userdevice = userdevice
self.runtime_properties = runtime_properties
self.allowed_operations = allowed_operations
self.uuid = uuid
self.storage_lock = storage_lock
self.qos_supported_algorithms = qos_supported_algorithms
self.status_code = status_code
self.type = type
self.empty = empty
self.status_detail = status_detail
self.device = device
self.qos_algorithm_type = qos_algorithm_type
self.unpluggable = unpluggable
self.current_operations = current_operations
self.bootable = bootable
self.other_config = other_config
self.currently_attached = currently_attached
self.mode = mode
self.qos_algorithm_params = qos_algorithm_params
@classmethod
def _dict_to_obj(cls, **kwargs):
vbd = VirtualBlockDevice(
userdevice=kwargs.get('userdevice'),
runtime_properties=kwargs.get('runtime_properties'),
allowed_operations=kwargs.get('allowed_operations'),
uuid=kwargs.get('uuid'), storage_lock=kwargs.get('storage_lock'),
qos_supported_algorithms=kwargs.get('qos_supported_algorithms'),
status_code=kwargs.get('status_code'), type=kwargs.get('type'),
empty=kwargs.get('empty'), device=kwargs.get('device'),
status_detail=kwargs.get('status_detail'),
qos_algorithm_type=kwargs.get('qos_algorithm_type'),
unpluggable=kwargs.get('unpluggable'),
current_operations=kwargs.get('current_operations'),
bootable=kwargs.get('bootable'), mode=kwargs.get('mode'),
other_config=kwargs.get('other_config'),
currently_attached=kwargs.get('currently_attached'),
qos_algorithm_params=kwargs.get('qos_algorithm_params'))
return vbd
class VirtualDiskImage(BaseModel):
def __init__(self, managed=None, snapshots=None, allowed_operations=None,
on_boot=None, description=None, read_only=None, uuid=None,
storage_lock=None, label=None, tags=None, location=None,
type=None, shareable=None, snapshot_time=None, missing=None,
xenstore_data=None, crash_dumps=None, virtual_size=None,
is_a_snapshot=None, current_operations=None,
physical_utilisation=None, allow_caching=None,
metadata_latest=None):
super(VirtualDiskImage, self).__init__()
self.managed = managed
self.snapshots = snapshots
self.allowed_operations = allowed_operations
self.on_boot = on_boot
self.description = description
self.read_only = read_only
self.uuid = uuid
self.storage_lock = storage_lock
self.label = label
self.tags = tags
self.location = location
self.type = type
self.shareable = shareable
self.snapshot_time = snapshot_time
self.missing = missing
self.xenstore_data = xenstore_data
self.crash_dumps = crash_dumps
self.virtual_size = virtual_size
self.is_a_snapshot = is_a_snapshot
self.current_operations = current_operations
self.physical_utilisation = physical_utilisation
self.allow_caching = allow_caching
self.metadata_latest = metadata_latest
@classmethod
def _dict_to_obj(cls, **kwargs):
vdi = VirtualDiskImage(
managed=kwargs.get('managed'), snapshots=kwargs.get('snapshots'),
allowed_operations=kwargs.get('allowed_operations'),
on_boot=kwargs.get('on_boot'), read_only=kwargs.get('read_only'),
description=kwargs.get('description'), uuid=kwargs.get('uuid'),
storage_lock=kwargs.get('storage_lock'), tags=kwargs.get('tags'),
label=kwargs.get('label'), location=kwargs.get('location'),
type=kwargs.get('type'), shareable=kwargs.get('shareable'),
snapshot_time=kwargs.get('snapshot_time'),
missing=kwargs.get('missing'),
virtual_size=kwargs.get('virtual_size'),
crash_dumps=kwargs.get('crash_dumps'),
xenstore_data=kwargs.get('xenstore_data'),
is_a_snapshot=kwargs.get('is_a_snapshot'),
current_operations=kwargs.get('current_operations'),
physical_utilisation=kwargs.get('physical_utilisation'),
allow_caching=kwargs.get('allow_caching'),
metadata_latest=kwargs.get('metadata_latest'))
return vdi
| 48.513619 | 78 | 0.671399 |
from cafe.engine.models.base import BaseModel
class VirtualMachine(BaseModel):
def __init__(self, label=None, description=None, vcpus_at_startup=None,
power_state=None, vcpus_params=None, vcpus_max=None,
xenstore_data=None, memory_static_min=None,
memory_static_max=None, memory_dynamic_min=None,
memory_dynamic_max=None, allowed_operations=None,
blocked_operations=None, ha_restart_priority=None,
pv_bootloader=None, snapshots=None, shutdown_delay=None,
domid=None, pci_bus=None, children=None,
hvm_shadow_multiplier=None, start_delay=None,
actions_after_crash=None, memory_target=None, uuid=None,
pv_ramdisk=None, tags=None, recommendations=None,
is_control_domain=None, hvm_boot_params=None,
snapshot_time=None, actions_after_shutdown=None,
user_version=None, snapshot_info=None,
transportable_snapshot_id=None, is_a_template=None,
crash_dumps=None, is_snapshot_from_vmpp=None,
is_a_snapshot=None, blobs=None, version=None,
current_operations=None, domarch=None,
pv_bootloader_args=None, snapshot_metadata=None,
other_config=None, actions_after_reboot=None,
attached_pcis=None, pv_legacy_args=None, bios_strings=None,
last_boot_cpu_flags=None, order=None):
super(VirtualMachine, self).__init__()
self.label = label
self.description = description
self.vcpus_at_startup = vcpus_at_startup
self.power_state = power_state
self.vcpus_params = vcpus_params
self.vcpus_max = vcpus_max
self.xenstore_data = xenstore_data
self.memory_static_min = memory_static_min
self.memory_static_max = memory_static_max
self.memory_dynamic_max = memory_dynamic_min
self.memory_dynamic_min = memory_dynamic_max
self.allowed_operations = allowed_operations
self.blocked_operations = blocked_operations
self.ha_restart_priority = ha_restart_priority
self.pv_bootloader = pv_bootloader
self.snapshots = snapshots
self.shutdown_delay = shutdown_delay
self.domid = domid
self.pci_bus = pci_bus
self.children = children
self.hvm_shadow_multiplier = hvm_shadow_multiplier
self.start_delay = start_delay
self.actions_after_crash = actions_after_crash
self.memory_target = memory_target
self.uuid = uuid
self.pv_ramdisk = pv_ramdisk
self.tags = tags
self.recommendations = recommendations
self.is_control_domain = is_control_domain
self.hvm_boot_params = hvm_boot_params
self.snapshot_time = snapshot_time
self.actions_after_shutdown = actions_after_shutdown
self.user_version = user_version
self.snapshot_info = snapshot_info
self.transportable_snapshot_id = transportable_snapshot_id
self.is_a_template = is_a_template
self.crash_dumps = crash_dumps
self.is_snapshot_from_vmpp = is_snapshot_from_vmpp
self.is_a_snapshot = is_a_snapshot
self.blobs = blobs
self.version = version
self.current_operations = current_operations
self.domarch = domarch
self.pv_bootloader_args = pv_bootloader_args
self.snapshot_metadata = snapshot_metadata
self.other_config = other_config
self.actions_after_reboot = actions_after_reboot
self.attached_pcis = attached_pcis
self.pv_legacy_args = pv_legacy_args
self.bios_strings = bios_strings
self.last_boot_cpu_flags = last_boot_cpu_flags
self.order = order
@classmethod
def _dict_to_obj(cls, **kwargs):
vm = VirtualMachine(
label=kwargs.get('name_label'),
description=kwargs.get('name_description'),
vcpus_at_startup=kwargs.get('VCPUs_at_startup'),
power_state=kwargs.get('power_state'),
vcpus_params=kwargs.get('vcpus_params'),
vcpus_max=kwargs.get('VCPUs_max'), version=kwargs.get('version'),
xenstore_data=kwargs.get('xenstore_data'),
memory_static_min=kwargs.get('memory_static_min'),
memory_static_max=kwargs.get('memory_static_max'),
memory_dynamic_min=kwargs.get('memory_dynamic_min'),
memory_dynamic_max=kwargs.get('memory_dynamic_max'),
allowed_operations=kwargs.get('allowed_operations'),
blocked_operations=kwargs.get('blocked_operations'),
ha_restart_priority=kwargs.get('ha_restart_priority'),
pv_bootloader=kwargs.get('PV_bootloader'),
snapshots=kwargs.get('snapshots'), domid=kwargs.get('domid'),
shutdown_delay=kwargs.get('shutdown_delay'),
pci_bus=kwargs.get('PCI_bus'), children=kwargs.get('children'),
hvm_shadow_multiplier=kwargs.get('HVM_shadow_multiplier'),
start_delay=kwargs.get('start_delay'), tags=kwargs.get('tags'),
actions_after_crash=kwargs.get('actions_after_crash'),
memory_target=kwargs.get('memory_target'),
uuid=kwargs.get('uuid'), pv_ramdisk=kwargs.get('pv_ramdisk'),
recommendations=kwargs.get('recommendations'),
is_control_domain=kwargs.get('is_control_domain'),
hvm_boot_params=kwargs.get('HVM_boot_params'),
snapshot_time=kwargs.get('snapshot_time'),
actions_after_shutdown=kwargs.get('actions_after_shutdown'),
user_version=kwargs.get('user_version'),
snapshot_info=kwargs.get('snapshot_info'),
transportable_snapshot_id=kwargs.get('transportable_snapshot_id'),
is_a_template=kwargs.get('is_a_template'),
crash_dumps=kwargs.get('crash_dumps'), blobs=kwargs.get('blobs'),
is_snapshot_from_vmpp=kwargs.get('is_snapshot_from_vmpp'),
is_a_snapshot=kwargs.get('is_a_snapshot'),
current_operations=kwargs.get('current_operations'),
domarch=kwargs.get('domarch'), order=kwargs.get('order'),
pv_bootloader_args=kwargs.get('PV_bootloader_args'),
snapshot_metadata=kwargs.get('snapshot_metadata'),
other_config=kwargs.get('other_config'),
actions_after_reboot=kwargs.get('actions_after_reboot'),
attached_pcis=kwargs.get('attached_PCIs'),
pv_legacy_args=kwargs.get('PV_legacy_args'),
bios_strings=kwargs.get('bios_strings'),
last_boot_cpu_flags=kwargs.get('last_boot_cpu_flags'))
return vm
class VirtualBlockDevice(BaseModel):
def __init__(
self, userdevice=None, runtime_properties=None,
allowed_operations=None, uuid=None, storage_lock=None,
qos_supported_algorithms=None, status_code=None,
type=None, empty=None, status_detail=None, device=None,
qos_algorithm_type=None, unpluggable=None,
current_operations=None, bootable=None, other_config=None,
currently_attached=None, mode=None, qos_algorithm_params=None):
super(VirtualBlockDevice, self).__init__()
self.userdevice = userdevice
self.runtime_properties = runtime_properties
self.allowed_operations = allowed_operations
self.uuid = uuid
self.storage_lock = storage_lock
self.qos_supported_algorithms = qos_supported_algorithms
self.status_code = status_code
self.type = type
self.empty = empty
self.status_detail = status_detail
self.device = device
self.qos_algorithm_type = qos_algorithm_type
self.unpluggable = unpluggable
self.current_operations = current_operations
self.bootable = bootable
self.other_config = other_config
self.currently_attached = currently_attached
self.mode = mode
self.qos_algorithm_params = qos_algorithm_params
@classmethod
def _dict_to_obj(cls, **kwargs):
vbd = VirtualBlockDevice(
userdevice=kwargs.get('userdevice'),
runtime_properties=kwargs.get('runtime_properties'),
allowed_operations=kwargs.get('allowed_operations'),
uuid=kwargs.get('uuid'), storage_lock=kwargs.get('storage_lock'),
qos_supported_algorithms=kwargs.get('qos_supported_algorithms'),
status_code=kwargs.get('status_code'), type=kwargs.get('type'),
empty=kwargs.get('empty'), device=kwargs.get('device'),
status_detail=kwargs.get('status_detail'),
qos_algorithm_type=kwargs.get('qos_algorithm_type'),
unpluggable=kwargs.get('unpluggable'),
current_operations=kwargs.get('current_operations'),
bootable=kwargs.get('bootable'), mode=kwargs.get('mode'),
other_config=kwargs.get('other_config'),
currently_attached=kwargs.get('currently_attached'),
qos_algorithm_params=kwargs.get('qos_algorithm_params'))
return vbd
class VirtualDiskImage(BaseModel):
def __init__(self, managed=None, snapshots=None, allowed_operations=None,
on_boot=None, description=None, read_only=None, uuid=None,
storage_lock=None, label=None, tags=None, location=None,
type=None, shareable=None, snapshot_time=None, missing=None,
xenstore_data=None, crash_dumps=None, virtual_size=None,
is_a_snapshot=None, current_operations=None,
physical_utilisation=None, allow_caching=None,
metadata_latest=None):
super(VirtualDiskImage, self).__init__()
self.managed = managed
self.snapshots = snapshots
self.allowed_operations = allowed_operations
self.on_boot = on_boot
self.description = description
self.read_only = read_only
self.uuid = uuid
self.storage_lock = storage_lock
self.label = label
self.tags = tags
self.location = location
self.type = type
self.shareable = shareable
self.snapshot_time = snapshot_time
self.missing = missing
self.xenstore_data = xenstore_data
self.crash_dumps = crash_dumps
self.virtual_size = virtual_size
self.is_a_snapshot = is_a_snapshot
self.current_operations = current_operations
self.physical_utilisation = physical_utilisation
self.allow_caching = allow_caching
self.metadata_latest = metadata_latest
@classmethod
def _dict_to_obj(cls, **kwargs):
vdi = VirtualDiskImage(
managed=kwargs.get('managed'), snapshots=kwargs.get('snapshots'),
allowed_operations=kwargs.get('allowed_operations'),
on_boot=kwargs.get('on_boot'), read_only=kwargs.get('read_only'),
description=kwargs.get('description'), uuid=kwargs.get('uuid'),
storage_lock=kwargs.get('storage_lock'), tags=kwargs.get('tags'),
label=kwargs.get('label'), location=kwargs.get('location'),
type=kwargs.get('type'), shareable=kwargs.get('shareable'),
snapshot_time=kwargs.get('snapshot_time'),
missing=kwargs.get('missing'),
virtual_size=kwargs.get('virtual_size'),
crash_dumps=kwargs.get('crash_dumps'),
xenstore_data=kwargs.get('xenstore_data'),
is_a_snapshot=kwargs.get('is_a_snapshot'),
current_operations=kwargs.get('current_operations'),
physical_utilisation=kwargs.get('physical_utilisation'),
allow_caching=kwargs.get('allow_caching'),
metadata_latest=kwargs.get('metadata_latest'))
return vdi
| true | true |
1c3b3b814414fec7149bdda04bee0237ffec2988 | 952 | py | Python | src/testing/TestON/drivers/common/api/controllerdriver.py | securedataplane/preacher | 2f76581de47036e79cd6e1183948c88b35ce4950 | [
"MIT"
] | 1 | 2020-07-23T08:06:44.000Z | 2020-07-23T08:06:44.000Z | src/testing/TestON/drivers/common/api/controllerdriver.py | securedataplane/preacher | 2f76581de47036e79cd6e1183948c88b35ce4950 | [
"MIT"
] | null | null | null | src/testing/TestON/drivers/common/api/controllerdriver.py | securedataplane/preacher | 2f76581de47036e79cd6e1183948c88b35ce4950 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Created on 29-Nov-2012
author:: Anil Kumar ( anilkumar.s@paxterrasolutions.com )
TestON is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
( at your option ) any later version.
TestON is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TestON. If not, see <http://www.gnu.org/licenses/>.
"""
from drivers.common.apidriver import API
class Controller( API ):
# The common functions for emulator included in emulatordriver
def __init__( self ):
super( API, self ).__init__()
| 30.709677 | 72 | 0.72479 |
from drivers.common.apidriver import API
class Controller( API ):
def __init__( self ):
super( API, self ).__init__()
| true | true |
1c3b3d147d89085e93dfaf79c785e932c04e4991 | 1,787 | py | Python | panko-6.0.0/panko/api/controllers/root.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | panko-6.0.0/panko/api/controllers/root.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | panko-6.0.0/panko/api/controllers/root.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json'
MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml'
class VersionsController(object):
@pecan.expose('json')
def index(self):
base_url = pecan.request.application_url
available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }]
collected = [version_descriptor(base_url, v['tag'], v['date'])
for v in available]
versions = {'versions': {'values': collected}}
return versions
def version_descriptor(base_url, version, released_on):
url = version_url(base_url, version)
return {
'id': version,
'links': [
{'href': url, 'rel': 'self', },
{'href': 'https://docs.openstack.org/',
'rel': 'describedby', 'type': 'text/html', }],
'media-types': [
{'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, },
{'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }],
'status': 'stable',
'updated': released_on,
}
def version_url(base_url, version_number):
return '%s/%s' % (base_url, version_number)
| 34.365385 | 78 | 0.645215 |
import pecan
MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json'
MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml'
class VersionsController(object):
@pecan.expose('json')
def index(self):
base_url = pecan.request.application_url
available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }]
collected = [version_descriptor(base_url, v['tag'], v['date'])
for v in available]
versions = {'versions': {'values': collected}}
return versions
def version_descriptor(base_url, version, released_on):
url = version_url(base_url, version)
return {
'id': version,
'links': [
{'href': url, 'rel': 'self', },
{'href': 'https://docs.openstack.org/',
'rel': 'describedby', 'type': 'text/html', }],
'media-types': [
{'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, },
{'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }],
'status': 'stable',
'updated': released_on,
}
def version_url(base_url, version_number):
return '%s/%s' % (base_url, version_number)
| true | true |
1c3b3d2af96538f5b7bae8a9331d2391119e7274 | 566 | py | Python | exercicios/Lista2/Q27.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista2/Q27.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | exercicios/Lista2/Q27.py | AlexandrePeBrito/CursoUdemyPython | 3de58cb30c9f333b32078309847179ff3f9d7e22 | [
"MIT"
] | null | null | null | #Escreva um programa que, dada a idade de um nadador,
#classifique-o em uma das seguintes categorias:
#Infantil A | 5 a 7
#Infantil B | 8 a 10
#Juvenil A | 11 a 13
#Juvenil B | 14 a 17
#Sênior | maiores de 18 anos
idade=int(input("Informe a idade: "))
if(5<=idade<=7):
print("A sua categoria eh Infantil A")
elif(8<=idade<=10):
print("A sua categoria eh Infantil B")
elif(11<=idade<=13):
print("A sua categoria eh Juvanil A")
elif(14<=idade<=17):
print("A sua categoria eh Juvanil A")
elif(18<=idade):
print("A sua categoria eh Senior") | 26.952381 | 54 | 0.664311 |
idade=int(input("Informe a idade: "))
if(5<=idade<=7):
print("A sua categoria eh Infantil A")
elif(8<=idade<=10):
print("A sua categoria eh Infantil B")
elif(11<=idade<=13):
print("A sua categoria eh Juvanil A")
elif(14<=idade<=17):
print("A sua categoria eh Juvanil A")
elif(18<=idade):
print("A sua categoria eh Senior") | true | true |
1c3b3df0acf7e687f9e1ffd2ae0f8fb09f6435f5 | 43,926 | py | Python | nova/tests/unit/virt/libvirt/test_utils.py | huiweics/nova | ca4226cb87b4a271ee0ad72a6b93662feea3025e | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/libvirt/test_utils.py | huiweics/nova | ca4226cb87b4a271ee0ad72a6b93662feea3025e | [
"Apache-2.0"
] | 1 | 2021-03-31T19:35:21.000Z | 2021-03-31T19:35:21.000Z | nova/tests/unit/virt/libvirt/test_utils.py | huiweics/nova | ca4226cb87b4a271ee0ad72a6b93662feea3025e | [
"Apache-2.0"
] | 1 | 2020-07-22T09:09:38.000Z | 2020-07-22T09:09:38.000Z | # Copyright 2012 NTT Data. All Rights Reserved.
# Copyright 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import tempfile
import ddt
import mock
import os_traits
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils.fixture import uuidsentinel as uuids
import six
from nova.compute import utils as compute_utils
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
import nova.privsep.fs
import nova.privsep.qemu
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
@ddt.ddt
class LibvirtUtilsTestCase(test.NoDBTestCase):
@mock.patch('oslo_concurrency.processutils.execute')
def test_copy_image_local(self, mock_execute):
libvirt_utils.copy_image('src', 'dest')
mock_execute.assert_called_once_with('cp', '-r', 'src', 'dest')
@mock.patch('nova.virt.libvirt.volume.remotefs.SshDriver.copy_file')
def test_copy_image_remote_ssh(self, mock_rem_fs_remove):
self.flags(remote_filesystem_transport='ssh', group='libvirt')
libvirt_utils.copy_image('src', 'dest', host='host')
mock_rem_fs_remove.assert_called_once_with('src', 'host:dest',
on_completion=None, on_execute=None, compression=True)
@mock.patch('nova.virt.libvirt.volume.remotefs.RsyncDriver.copy_file')
def test_copy_image_remote_rsync(self, mock_rem_fs_remove):
self.flags(remote_filesystem_transport='rsync', group='libvirt')
libvirt_utils.copy_image('src', 'dest', host='host')
mock_rem_fs_remove.assert_called_once_with('src', 'host:dest',
on_completion=None, on_execute=None, compression=True)
@mock.patch('os.path.exists', return_value=True)
def test_disk_type_from_path(self, mock_exists):
# Seems like lvm detection
# if its in /dev ??
for p in ['/dev/b', '/dev/blah/blah']:
d_type = libvirt_utils.get_disk_type_from_path(p)
self.assertEqual('lvm', d_type)
# Try rbd detection
d_type = libvirt_utils.get_disk_type_from_path('rbd:pool/instance')
self.assertEqual('rbd', d_type)
# Try the other types
path = '/myhome/disk.config'
d_type = libvirt_utils.get_disk_type_from_path(path)
self.assertIsNone(d_type)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isdir', return_value=True)
def test_disk_type_ploop(self, mock_isdir, mock_exists):
path = '/some/path'
d_type = libvirt_utils.get_disk_type_from_path(path)
mock_isdir.assert_called_once_with(path)
mock_exists.assert_called_once_with("%s/DiskDescriptor.xml" % path)
self.assertEqual('ploop', d_type)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_disk_backing(self, mock_execute, mock_exists):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: 2K (2048 bytes)
cluster_size: 65536
disk size: 96K
"""
output = template_output % ({
'path': path,
})
mock_execute.return_value = (output, '')
d_backing = libvirt_utils.get_disk_backing_file(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertIsNone(d_backing)
def _test_disk_size(self, mock_execute, path, expected_size):
d_size = libvirt_utils.get_disk_size(path)
self.assertEqual(expected_size, d_size)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
@mock.patch('os.path.exists', return_value=True)
def test_disk_size(self, mock_exists):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: %(v_size)s (%(vsize_b)s bytes)
cluster_size: 65536
disk size: 96K
"""
for i in range(0, 128):
bytes = i * 65336
kbytes = bytes / 1024
mbytes = kbytes / 1024
output = template_output % ({
'v_size': "%sM" % (mbytes),
'vsize_b': i,
'path': path,
})
with mock.patch('oslo_concurrency.processutils.execute',
return_value=(output, '')) as mock_execute:
self._test_disk_size(mock_execute, path, i)
output = template_output % ({
'v_size': "%sK" % (kbytes),
'vsize_b': i,
'path': path,
})
with mock.patch('oslo_concurrency.processutils.execute',
return_value=(output, '')) as mock_execute:
self._test_disk_size(mock_execute, path, i)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_canon(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_canon_qemu_2_10(self, mock_execute, mock_exists):
images.QEMU_VERSION = nova.privsep.qemu.QEMU_VERSION_REQ_SHARED
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
'--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_canon2(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: QCOW2
virtual size: 67108844
cluster_size: 65536
disk size: 963434
backing file: /var/lib/nova/a328c7998805951a_2
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('qcow2', image_info.file_format)
self.assertEqual(67108844, image_info.virtual_size)
self.assertEqual(963434, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
self.assertEqual('/var/lib/nova/a328c7998805951a_2',
image_info.backing_file)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_ploop(self, mock_execute, mock_isdir, mock_exists):
path = "/var/lib/nova"
example_output = """image: root.hds
file format: parallels
virtual size: 3.0G (3221225472 bytes)
disk size: 706M
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
os.path.join(path, 'root.hds'),
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_isdir.assert_called_once_with(path)
self.assertEqual(2, mock_exists.call_count)
self.assertEqual(path, mock_exists.call_args_list[0][0][0])
self.assertEqual(os.path.join(path, 'DiskDescriptor.xml'),
mock_exists.call_args_list[1][0][0])
self.assertEqual('root.hds', image_info.image)
self.assertEqual('parallels', image_info.file_format)
self.assertEqual(3221225472, image_info.virtual_size)
self.assertEqual(740294656, image_info.disk_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_backing_file_actual(self,
mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(1, len(image_info.snapshots))
self.assertEqual('/b/3a988059e51a_2',
image_info.backing_file)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_convert(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
junk stuff: bbb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_snaps(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(3, len(image_info.snapshots))
def test_valid_hostname_normal(self):
self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com"))
def test_valid_hostname_ipv4addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("10.0.2.1"))
def test_valid_hostname_ipv6addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("240:2ac3::2"))
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
@mock.patch('oslo_concurrency.processutils.execute')
def test_create_image(self, mock_execute):
libvirt_utils.create_image('raw', '/some/path', '10G')
libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
expected_args = [(('qemu-img', 'create', '-f', 'raw',
'/some/path', '10G'),),
(('qemu-img', 'create', '-f', 'qcow2',
'/some/stuff', '1234567891234'),)]
self.assertEqual(expected_args, mock_execute.call_args_list)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
def test_create_cow_image(self, mock_info, mock_execute, mock_exists):
mock_execute.return_value = ('stdout', None)
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
cluster_size=mock.sentinel.cluster_size)
libvirt_utils.create_cow_image(mock.sentinel.backing_path,
mock.sentinel.new_path)
mock_info.assert_called_once_with(mock.sentinel.backing_path)
mock_execute.assert_has_calls([mock.call(
'qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s,backing_fmt=%s,cluster_size=%s' % (
mock.sentinel.backing_path, mock.sentinel.backing_fmt,
mock.sentinel.cluster_size),
mock.sentinel.new_path)])
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
'default_eph_format': None,
'expected_fs_type': 'some_fs_type'},
{'fs_type': None,
'default_eph_format': None,
'expected_fs_type': nova.privsep.fs.FS_FORMAT_EXT4},
{'fs_type': None,
'default_eph_format': 'eph_format',
'expected_fs_type': 'eph_format'})
def test_create_ploop_image(self, fs_type,
default_eph_format,
expected_fs_type):
with test.nested(mock.patch('oslo_utils.fileutils.ensure_tree'),
mock.patch('nova.privsep.libvirt.ploop_init')
) as (mock_ensure_tree, mock_ploop_init):
self.flags(default_ephemeral_format=default_eph_format)
libvirt_utils.create_ploop_image('expanded', '/some/path',
'5G', fs_type)
mock_ensure_tree.assert_has_calls([
mock.call('/some/path')])
mock_ploop_init.assert_has_calls([
mock.call('5G', 'expanded', expected_fs_type,
'/some/path/root.hds')])
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
# NOTE(aloga): Xen is tested in test_pick_disk_driver_name_xen
version = 1005001
for (virt_type, checks) in type_map.items():
self.flags(virt_type=virt_type, group='libvirt')
for (is_block_dev, expected_result) in checks:
result = libvirt_utils.pick_disk_driver_name(version,
is_block_dev)
self.assertEqual(result, expected_result)
@mock.patch('nova.privsep.libvirt.xend_probe')
@mock.patch('oslo_concurrency.processutils.execute')
def test_pick_disk_driver_name_xen(self, mock_execute, mock_xend_probe):
def execute_side_effect(*args, **kwargs):
if args == ('tap-ctl', 'check'):
if mock_execute.blktap is True:
return ('ok\n', '')
elif mock_execute.blktap is False:
return ('some error\n', '')
else:
raise OSError(2, "No such file or directory")
raise Exception('Unexpected call')
mock_execute.side_effect = execute_side_effect
def xend_probe_side_effect():
if mock_execute.xend is True:
return ('', '')
elif mock_execute.xend is False:
raise processutils.ProcessExecutionError("error")
else:
raise OSError(2, "No such file or directory")
mock_xend_probe.side_effect = xend_probe_side_effect
self.flags(virt_type="xen", group='libvirt')
versions = [4000000, 4001000, 4002000, 4003000, 4005000]
for version in versions:
# block dev
result = libvirt_utils.pick_disk_driver_name(version, True)
self.assertEqual(result, "phy")
self.assertFalse(mock_execute.called)
mock_execute.reset_mock()
# file dev
for blktap in True, False, None:
mock_execute.blktap = blktap
for xend in True, False, None:
mock_execute.xend = xend
result = libvirt_utils.pick_disk_driver_name(version,
False)
# qemu backend supported only by libxl which is
# production since xen 4.2. libvirt use libxl if
# xend service not started.
if version >= 4002000 and xend is not True:
self.assertEqual(result, 'qemu')
elif blktap:
if version == 4000000:
self.assertEqual(result, 'tap')
else:
self.assertEqual(result, 'tap2')
else:
self.assertEqual(result, 'file')
# default is_block_dev False
self.assertEqual(result,
libvirt_utils.pick_disk_driver_name(version))
mock_execute.reset_mock()
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_get_disk_size(self, mock_execute, mock_exists):
path = '/some/path'
example_output = """image: 00000001
file format: raw
virtual size: 4.4M (4592640 bytes)
disk size: 4.4M
"""
mock_execute.return_value = (example_output, '')
self.assertEqual(4592640, disk.get_disk_size('/some/path'))
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
def test_copy_image(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
src_fd, src_path = tempfile.mkstemp()
try:
with os.fdopen(src_fd, 'w') as fp:
fp.write('canary')
libvirt_utils.copy_image(src_path, dst_path)
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'canary')
finally:
os.unlink(src_path)
finally:
os.unlink(dst_path)
def test_write_to_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=False)
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_no_directio(self, mock_execute,
mock_direct_io,
mock_disk_op_sema):
# Test a single variant with no support for direct IO.
# This could be removed if we add unit tests for convert_image().
src_format = 'qcow2'
dest_format = 'raw'
out_format = 'raw'
libvirt_utils.extract_snapshot('/path/to/disk/image', src_format,
'/extracted/snap', dest_format)
qemu_img_cmd = ('qemu-img', 'convert', '-t', 'writeback',
'-O', out_format, '-f', src_format, )
if CONF.libvirt.snapshot_compression and dest_format == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += ('/path/to/disk/image', '/extracted/snap')
mock_disk_op_sema.__enter__.assert_called_once()
mock_direct_io.assert_called_once_with(CONF.instances_path)
mock_execute.assert_called_once_with(*qemu_img_cmd)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
def _do_test_extract_snapshot(self, mock_execute, mock_direct_io,
mock_disk_op_sema,
src_format='qcow2',
dest_format='raw', out_format='raw'):
libvirt_utils.extract_snapshot('/path/to/disk/image', src_format,
'/extracted/snap', dest_format)
qemu_img_cmd = ('qemu-img', 'convert', '-t', 'none',
'-O', out_format, '-f', src_format, )
if CONF.libvirt.snapshot_compression and dest_format == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += ('/path/to/disk/image', '/extracted/snap')
mock_disk_op_sema.__enter__.assert_called_once()
mock_direct_io.assert_called_once_with(CONF.instances_path)
mock_execute.assert_called_once_with(*qemu_img_cmd)
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_raw(self, mock_execute):
self._do_test_extract_snapshot(mock_execute)
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_iso(self, mock_execute):
self._do_test_extract_snapshot(mock_execute, dest_format='iso')
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_qcow2(self, mock_execute):
self._do_test_extract_snapshot(mock_execute,
dest_format='qcow2', out_format='qcow2')
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_qcow2_and_compression(self, mock_execute):
self.flags(snapshot_compression=True, group='libvirt')
self._do_test_extract_snapshot(mock_execute,
dest_format='qcow2', out_format='qcow2')
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_parallels(self, mock_execute):
self._do_test_extract_snapshot(mock_execute,
src_format='raw',
dest_format='ploop',
out_format='parallels')
def test_load_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
self.assertEqual(libvirt_utils.load_file(dst_path), 'hello')
finally:
os.unlink(dst_path)
def test_file_open(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
with libvirt_utils.file_open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_get_fs_info(self):
class FakeStatResult(object):
def __init__(self):
self.f_bsize = 4096
self.f_frsize = 4096
self.f_blocks = 2000
self.f_bfree = 1000
self.f_bavail = 900
self.f_files = 2000
self.f_ffree = 1000
self.f_favail = 900
self.f_flag = 4096
self.f_namemax = 255
self.path = None
def fake_statvfs(path):
self.path = path
return FakeStatResult()
self.stub_out('os.statvfs', fake_statvfs)
fs_info = libvirt_utils.get_fs_info('/some/file/path')
self.assertEqual('/some/file/path', self.path)
self.assertEqual(8192000, fs_info['total'])
self.assertEqual(3686400, fs_info['free'])
self.assertEqual(4096000, fs_info['used'])
@mock.patch('nova.virt.images.fetch_to_raw')
def test_fetch_image(self, mock_images):
context = 'opaque context'
target = '/tmp/targetfile'
image_id = '4'
trusted_certs = objects.TrustedCerts(
ids=['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8',
'674736e3-f25c-405c-8362-bbf991e0ce0a'])
libvirt_utils.fetch_image(context, target, image_id, trusted_certs)
mock_images.assert_called_once_with(
context, image_id, target, trusted_certs)
@mock.patch('nova.virt.images.fetch')
def test_fetch_initrd_image(self, mock_images):
_context = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
target = '/tmp/targetfile'
image_id = '4'
trusted_certs = objects.TrustedCerts(
ids=['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8',
'674736e3-f25c-405c-8362-bbf991e0ce0a'])
libvirt_utils.fetch_raw_image(_context, target, image_id,
trusted_certs)
mock_images.assert_called_once_with(
_context, image_id, target, trusted_certs)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
@mock.patch('nova.privsep.qemu.unprivileged_convert_image')
def test_fetch_raw_image(self, mock_convert_image, mock_direct_io,
mock_disk_op_sema):
def fake_rename(old, new):
self.executes.append(('mv', old, new))
def fake_unlink(path):
self.executes.append(('rm', path))
def fake_rm_on_error(path, remove=None):
self.executes.append(('rm', '-f', path))
def fake_qemu_img_info(path):
class FakeImgInfo(object):
pass
file_format = path.split('.')[-1]
if file_format == 'part':
file_format = path.split('.')[-2]
elif file_format == 'converted':
file_format = 'raw'
if 'backing' in path:
backing_file = 'backing'
else:
backing_file = None
FakeImgInfo.file_format = file_format
FakeImgInfo.backing_file = backing_file
FakeImgInfo.virtual_size = 1
return FakeImgInfo()
self.stub_out('os.rename', fake_rename)
self.stub_out('os.unlink', fake_unlink)
self.stub_out('nova.virt.images.fetch', lambda *_, **__: None)
self.stub_out('nova.virt.images.qemu_img_info', fake_qemu_img_info)
self.stub_out('oslo_utils.fileutils.delete_if_exists',
fake_rm_on_error)
# Since the remove param of fileutils.remove_path_on_error()
# is initialized at load time, we must provide a wrapper
# that explicitly resets it to our fake delete_if_exists()
old_rm_path_on_error = fileutils.remove_path_on_error
f = functools.partial(old_rm_path_on_error, remove=fake_rm_on_error)
self.stub_out('oslo_utils.fileutils.remove_path_on_error', f)
context = 'opaque context'
image_id = '4'
target = 't.qcow2'
self.executes = []
expected_commands = [('rm', 't.qcow2.part'),
('mv', 't.qcow2.converted', 't.qcow2')]
images.fetch_to_raw(context, image_id, target)
self.assertEqual(self.executes, expected_commands)
mock_disk_op_sema.__enter__.assert_called_once()
mock_convert_image.assert_called_with(
't.qcow2.part', 't.qcow2.converted', 'qcow2', 'raw',
CONF.instances_path, False)
mock_convert_image.reset_mock()
target = 't.raw'
self.executes = []
expected_commands = [('mv', 't.raw.part', 't.raw')]
images.fetch_to_raw(context, image_id, target)
self.assertEqual(self.executes, expected_commands)
mock_convert_image.assert_not_called()
target = 'backing.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'backing.qcow2.part')]
self.assertRaises(exception.ImageUnacceptable,
images.fetch_to_raw, context, image_id, target)
self.assertEqual(self.executes, expected_commands)
mock_convert_image.assert_not_called()
del self.executes
def test_get_disk_backing_file(self):
with_actual_path = False
def fake_execute(*args, **kwargs):
if with_actual_path:
return ("some: output\n"
"backing file: /foo/bar/baz (actual path: /a/b/c)\n"
"...: ...\n"), ''
else:
return ("some: output\n"
"backing file: /foo/bar/baz\n"
"...: ...\n"), ''
def return_true(*args, **kwargs):
return True
self.stub_out('oslo_concurrency.processutils.execute', fake_execute)
self.stub_out('os.path.exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
with_actual_path = True
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'c')
def test_get_instance_path_at_destination(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid=uuids.instance)
migrate_data = None
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
expected_path = os.path.join(CONF.instances_path, instance['uuid'])
self.assertEqual(expected_path, inst_path_at_dest)
migrate_data = {}
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
expected_path = os.path.join(CONF.instances_path, instance['uuid'])
self.assertEqual(expected_path, inst_path_at_dest)
migrate_data = objects.LibvirtLiveMigrateData(
instance_relative_path='fake_relative_path')
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
expected_path = os.path.join(CONF.instances_path, 'fake_relative_path')
self.assertEqual(expected_path, inst_path_at_dest)
def test_get_arch(self):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'architecture': "X86_64"}})
image_arch = libvirt_utils.get_arch(image_meta)
self.assertEqual(obj_fields.Architecture.X86_64, image_arch)
def test_is_mounted(self):
mount_path = "/var/lib/nova/mnt"
source = "192.168.0.1:/nova"
proc_with_mnt = """/dev/sda3 / xfs rw,seclabel,attr2,inode64 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
192.168.0.1:/nova /var/lib/nova/mnt nfs4 rw,relatime,vers=4.1
"""
proc_wrong_mnt = """/dev/sda3 / xfs rw,seclabel,attr2,inode64 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
192.168.0.2:/nova /var/lib/nova/mnt nfs4 rw,relatime,vers=4.1
"""
proc_without_mnt = """/dev/sda3 / xfs rw,seclabel,,attr2,inode64 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
"""
with mock.patch.object(os.path, 'ismount') as mock_ismount:
# is_mounted(mount_path) with no source is equivalent to
# os.path.ismount(mount_path)
mock_ismount.return_value = False
self.assertFalse(libvirt_utils.is_mounted(mount_path))
mock_ismount.return_value = True
self.assertTrue(libvirt_utils.is_mounted(mount_path))
# Source is given, and matches source in /proc/mounts
proc_mnt = mock.mock_open(read_data=proc_with_mnt)
with mock.patch.object(six.moves.builtins, "open", proc_mnt):
self.assertTrue(libvirt_utils.is_mounted(mount_path, source))
# Source is given, and doesn't match source in /proc/mounts
proc_mnt = mock.mock_open(read_data=proc_wrong_mnt)
with mock.patch.object(six.moves.builtins, "open", proc_mnt):
self.assertFalse(libvirt_utils.is_mounted(mount_path, source))
# Source is given, and mountpoint isn't present in /proc/mounts
# Note that this shouldn't occur, as os.path.ismount should have
# previously returned False in this case.
proc_umnt = mock.mock_open(read_data=proc_without_mnt)
with mock.patch.object(six.moves.builtins, "open", proc_umnt):
self.assertFalse(libvirt_utils.is_mounted(mount_path, source))
def test_find_disk_file_device(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='kvm'>
<os>
<type>linux</type>
</os>
<devices>
<disk type="file" device="disk">
<driver name="qemu" type="qcow2" cache="none" io="native"/>
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
</disk>
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('/tmp/hello', disk_path)
self.assertEqual('qcow2', format)
def test_find_disk_block_device(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='kvm'>
<os>
<type>linux</type>
</os>
<devices>
<disk type="block" device="disk">
<driver name="qemu" type="raw"/>
<source dev="/dev/nova-vg/hello"/>
<target bus="ide" dev="/dev/hda"/>
</disk>
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('/dev/nova-vg/hello', disk_path)
self.assertEqual('raw', format)
def test_find_disk_rbd(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='kvm'>
<os>
<type>linux</type>
</os>
<devices>
<disk type="network" device="disk">
<driver name="qemu" type="raw"/>
<source name="pool/image" protocol="rbd">
<host name="1.2.3.4" port="456"/>
</source>
<target bus="virtio" dev="/dev/vda"/>
</disk>
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('rbd:pool/image', disk_path)
self.assertEqual('raw', format)
def test_find_disk_lxc(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='lxc'>
<os>
<type>exe</type>
</os>
<devices>
<filesystem type="mount">
<source dir="/myhome/rootfs"/>
<target dir="/"/>
</filesystem>
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('/myhome/disk', disk_path)
self.assertIsNone(format)
def test_find_disk_parallels(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='parallels'>
<os>
<type>exe</type>
</os>
<devices>
<filesystem type='file'>"
<driver format='ploop' type='ploop'/>"
<source file='/test/disk'/>"
<target dir='/'/>
</filesystem>"
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('/test/disk', disk_path)
self.assertEqual('ploop', format)
@mock.patch('nova.virt.libvirt.utils.get_arch')
def test_get_machine_type_from_fallbacks(self, mock_get_arch):
"""Test hardcoded arch-specific fallbacks for default machine type"""
image_meta = objects.ImageMeta.from_dict({"disk_format": "raw"})
host_cpu_archs = {
obj_fields.Architecture.ARMV7: "virt",
obj_fields.Architecture.AARCH64: "virt",
obj_fields.Architecture.S390: "s390-ccw-virtio",
obj_fields.Architecture.S390X: "s390-ccw-virtio",
obj_fields.Architecture.I686: "pc",
obj_fields.Architecture.X86_64: "pc",
}
for arch, expected_mtype in host_cpu_archs.items():
mock_get_arch.return_value = arch
mtype = libvirt_utils.get_machine_type(image_meta)
self.assertEqual(expected_mtype, mtype)
def test_get_machine_type_from_conf(self):
self.useFixture(nova_fixtures.ConfPatcher(
group="libvirt", hw_machine_type=['x86_64=q35', 'i686=legacy']))
self.assertEqual('q35',
libvirt_utils.get_default_machine_type('x86_64'))
def test_get_machine_type_no_conf_or_fallback(self):
self.assertIsNone(libvirt_utils.get_default_machine_type('sparc'))
def test_get_machine_type_missing_conf_and_fallback(self):
self.useFixture(nova_fixtures.ConfPatcher(
group="libvirt", hw_machine_type=['x86_64=q35', 'i686=legacy']))
self.assertIsNone(libvirt_utils.get_default_machine_type('sparc'))
def test_get_machine_type_survives_invalid_conf(self):
self.useFixture(nova_fixtures.ConfPatcher(
group="libvirt", hw_machine_type=['x86_64=q35', 'foo']))
self.assertEqual('q35',
libvirt_utils.get_default_machine_type('x86_64'))
def test_get_machine_type_from_image(self):
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw", "properties": {"hw_machine_type": "q35"}
})
os_mach_type = libvirt_utils.get_machine_type(image_meta)
self.assertEqual('q35', os_mach_type)
def test_get_flags_by_flavor_specs(self):
flavor = objects.Flavor(
id=1, flavorid='fakeid-1', name='fake1.small', memory_mb=128,
vcpus=1, root_gb=1, ephemeral_gb=0, swap=0, rxtx_factor=0,
deleted=False, extra_specs={
'trait:%s' % os_traits.HW_CPU_X86_3DNOW: 'required',
'trait:%s' % os_traits.HW_CPU_X86_SSE2: 'required',
'trait:%s' % os_traits.HW_CPU_HYPERTHREADING: 'required',
})
traits = libvirt_utils.get_flags_by_flavor_specs(flavor)
# we shouldn't see the hyperthreading trait since that's a valid trait
# but not a CPU flag
self.assertEqual(set(['3dnow', 'sse2']), traits)
| 42.938416 | 79 | 0.622593 |
import functools
import os
import tempfile
import ddt
import mock
import os_traits
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils.fixture import uuidsentinel as uuids
import six
from nova.compute import utils as compute_utils
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
import nova.privsep.fs
import nova.privsep.qemu
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
@ddt.ddt
class LibvirtUtilsTestCase(test.NoDBTestCase):
@mock.patch('oslo_concurrency.processutils.execute')
def test_copy_image_local(self, mock_execute):
libvirt_utils.copy_image('src', 'dest')
mock_execute.assert_called_once_with('cp', '-r', 'src', 'dest')
@mock.patch('nova.virt.libvirt.volume.remotefs.SshDriver.copy_file')
def test_copy_image_remote_ssh(self, mock_rem_fs_remove):
self.flags(remote_filesystem_transport='ssh', group='libvirt')
libvirt_utils.copy_image('src', 'dest', host='host')
mock_rem_fs_remove.assert_called_once_with('src', 'host:dest',
on_completion=None, on_execute=None, compression=True)
@mock.patch('nova.virt.libvirt.volume.remotefs.RsyncDriver.copy_file')
def test_copy_image_remote_rsync(self, mock_rem_fs_remove):
self.flags(remote_filesystem_transport='rsync', group='libvirt')
libvirt_utils.copy_image('src', 'dest', host='host')
mock_rem_fs_remove.assert_called_once_with('src', 'host:dest',
on_completion=None, on_execute=None, compression=True)
@mock.patch('os.path.exists', return_value=True)
def test_disk_type_from_path(self, mock_exists):
for p in ['/dev/b', '/dev/blah/blah']:
d_type = libvirt_utils.get_disk_type_from_path(p)
self.assertEqual('lvm', d_type)
d_type = libvirt_utils.get_disk_type_from_path('rbd:pool/instance')
self.assertEqual('rbd', d_type)
path = '/myhome/disk.config'
d_type = libvirt_utils.get_disk_type_from_path(path)
self.assertIsNone(d_type)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isdir', return_value=True)
def test_disk_type_ploop(self, mock_isdir, mock_exists):
path = '/some/path'
d_type = libvirt_utils.get_disk_type_from_path(path)
mock_isdir.assert_called_once_with(path)
mock_exists.assert_called_once_with("%s/DiskDescriptor.xml" % path)
self.assertEqual('ploop', d_type)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_disk_backing(self, mock_execute, mock_exists):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: 2K (2048 bytes)
cluster_size: 65536
disk size: 96K
"""
output = template_output % ({
'path': path,
})
mock_execute.return_value = (output, '')
d_backing = libvirt_utils.get_disk_backing_file(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertIsNone(d_backing)
def _test_disk_size(self, mock_execute, path, expected_size):
d_size = libvirt_utils.get_disk_size(path)
self.assertEqual(expected_size, d_size)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
@mock.patch('os.path.exists', return_value=True)
def test_disk_size(self, mock_exists):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: %(v_size)s (%(vsize_b)s bytes)
cluster_size: 65536
disk size: 96K
"""
for i in range(0, 128):
bytes = i * 65336
kbytes = bytes / 1024
mbytes = kbytes / 1024
output = template_output % ({
'v_size': "%sM" % (mbytes),
'vsize_b': i,
'path': path,
})
with mock.patch('oslo_concurrency.processutils.execute',
return_value=(output, '')) as mock_execute:
self._test_disk_size(mock_execute, path, i)
output = template_output % ({
'v_size': "%sK" % (kbytes),
'vsize_b': i,
'path': path,
})
with mock.patch('oslo_concurrency.processutils.execute',
return_value=(output, '')) as mock_execute:
self._test_disk_size(mock_execute, path, i)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_canon(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_canon_qemu_2_10(self, mock_execute, mock_exists):
images.QEMU_VERSION = nova.privsep.qemu.QEMU_VERSION_REQ_SHARED
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
'--force-share', prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_canon2(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: QCOW2
virtual size: 67108844
cluster_size: 65536
disk size: 963434
backing file: /var/lib/nova/a328c7998805951a_2
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('qcow2', image_info.file_format)
self.assertEqual(67108844, image_info.virtual_size)
self.assertEqual(963434, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
self.assertEqual('/var/lib/nova/a328c7998805951a_2',
image_info.backing_file)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_ploop(self, mock_execute, mock_isdir, mock_exists):
path = "/var/lib/nova"
example_output = """image: root.hds
file format: parallels
virtual size: 3.0G (3221225472 bytes)
disk size: 706M
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
os.path.join(path, 'root.hds'),
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_isdir.assert_called_once_with(path)
self.assertEqual(2, mock_exists.call_count)
self.assertEqual(path, mock_exists.call_args_list[0][0][0])
self.assertEqual(os.path.join(path, 'DiskDescriptor.xml'),
mock_exists.call_args_list[1][0][0])
self.assertEqual('root.hds', image_info.image)
self.assertEqual('parallels', image_info.file_format)
self.assertEqual(3221225472, image_info.virtual_size)
self.assertEqual(740294656, image_info.disk_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_backing_file_actual(self,
mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(1, len(image_info.snapshots))
self.assertEqual('/b/3a988059e51a_2',
image_info.backing_file)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_convert(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
junk stuff: bbb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_qemu_info_snaps(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(3, len(image_info.snapshots))
def test_valid_hostname_normal(self):
self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com"))
def test_valid_hostname_ipv4addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("10.0.2.1"))
def test_valid_hostname_ipv6addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("240:2ac3::2"))
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
@mock.patch('oslo_concurrency.processutils.execute')
def test_create_image(self, mock_execute):
libvirt_utils.create_image('raw', '/some/path', '10G')
libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
expected_args = [(('qemu-img', 'create', '-f', 'raw',
'/some/path', '10G'),),
(('qemu-img', 'create', '-f', 'qcow2',
'/some/stuff', '1234567891234'),)]
self.assertEqual(expected_args, mock_execute.call_args_list)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
def test_create_cow_image(self, mock_info, mock_execute, mock_exists):
mock_execute.return_value = ('stdout', None)
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
cluster_size=mock.sentinel.cluster_size)
libvirt_utils.create_cow_image(mock.sentinel.backing_path,
mock.sentinel.new_path)
mock_info.assert_called_once_with(mock.sentinel.backing_path)
mock_execute.assert_has_calls([mock.call(
'qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s,backing_fmt=%s,cluster_size=%s' % (
mock.sentinel.backing_path, mock.sentinel.backing_fmt,
mock.sentinel.cluster_size),
mock.sentinel.new_path)])
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
'default_eph_format': None,
'expected_fs_type': 'some_fs_type'},
{'fs_type': None,
'default_eph_format': None,
'expected_fs_type': nova.privsep.fs.FS_FORMAT_EXT4},
{'fs_type': None,
'default_eph_format': 'eph_format',
'expected_fs_type': 'eph_format'})
def test_create_ploop_image(self, fs_type,
default_eph_format,
expected_fs_type):
with test.nested(mock.patch('oslo_utils.fileutils.ensure_tree'),
mock.patch('nova.privsep.libvirt.ploop_init')
) as (mock_ensure_tree, mock_ploop_init):
self.flags(default_ephemeral_format=default_eph_format)
libvirt_utils.create_ploop_image('expanded', '/some/path',
'5G', fs_type)
mock_ensure_tree.assert_has_calls([
mock.call('/some/path')])
mock_ploop_init.assert_has_calls([
mock.call('5G', 'expanded', expected_fs_type,
'/some/path/root.hds')])
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
version = 1005001
for (virt_type, checks) in type_map.items():
self.flags(virt_type=virt_type, group='libvirt')
for (is_block_dev, expected_result) in checks:
result = libvirt_utils.pick_disk_driver_name(version,
is_block_dev)
self.assertEqual(result, expected_result)
@mock.patch('nova.privsep.libvirt.xend_probe')
@mock.patch('oslo_concurrency.processutils.execute')
def test_pick_disk_driver_name_xen(self, mock_execute, mock_xend_probe):
def execute_side_effect(*args, **kwargs):
if args == ('tap-ctl', 'check'):
if mock_execute.blktap is True:
return ('ok\n', '')
elif mock_execute.blktap is False:
return ('some error\n', '')
else:
raise OSError(2, "No such file or directory")
raise Exception('Unexpected call')
mock_execute.side_effect = execute_side_effect
def xend_probe_side_effect():
if mock_execute.xend is True:
return ('', '')
elif mock_execute.xend is False:
raise processutils.ProcessExecutionError("error")
else:
raise OSError(2, "No such file or directory")
mock_xend_probe.side_effect = xend_probe_side_effect
self.flags(virt_type="xen", group='libvirt')
versions = [4000000, 4001000, 4002000, 4003000, 4005000]
for version in versions:
result = libvirt_utils.pick_disk_driver_name(version, True)
self.assertEqual(result, "phy")
self.assertFalse(mock_execute.called)
mock_execute.reset_mock()
for blktap in True, False, None:
mock_execute.blktap = blktap
for xend in True, False, None:
mock_execute.xend = xend
result = libvirt_utils.pick_disk_driver_name(version,
False)
if version >= 4002000 and xend is not True:
self.assertEqual(result, 'qemu')
elif blktap:
if version == 4000000:
self.assertEqual(result, 'tap')
else:
self.assertEqual(result, 'tap2')
else:
self.assertEqual(result, 'file')
self.assertEqual(result,
libvirt_utils.pick_disk_driver_name(version))
mock_execute.reset_mock()
@mock.patch('os.path.exists', return_value=True)
@mock.patch('oslo_concurrency.processutils.execute')
def test_get_disk_size(self, mock_execute, mock_exists):
path = '/some/path'
example_output = """image: 00000001
file format: raw
virtual size: 4.4M (4592640 bytes)
disk size: 4.4M
"""
mock_execute.return_value = (example_output, '')
self.assertEqual(4592640, disk.get_disk_size('/some/path'))
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path,
prlimit=nova.privsep.qemu.QEMU_IMG_LIMITS)
mock_exists.assert_called_once_with(path)
def test_copy_image(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
src_fd, src_path = tempfile.mkstemp()
try:
with os.fdopen(src_fd, 'w') as fp:
fp.write('canary')
libvirt_utils.copy_image(src_path, dst_path)
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'canary')
finally:
os.unlink(src_path)
finally:
os.unlink(dst_path)
def test_write_to_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=False)
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_no_directio(self, mock_execute,
mock_direct_io,
mock_disk_op_sema):
src_format = 'qcow2'
dest_format = 'raw'
out_format = 'raw'
libvirt_utils.extract_snapshot('/path/to/disk/image', src_format,
'/extracted/snap', dest_format)
qemu_img_cmd = ('qemu-img', 'convert', '-t', 'writeback',
'-O', out_format, '-f', src_format, )
if CONF.libvirt.snapshot_compression and dest_format == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += ('/path/to/disk/image', '/extracted/snap')
mock_disk_op_sema.__enter__.assert_called_once()
mock_direct_io.assert_called_once_with(CONF.instances_path)
mock_execute.assert_called_once_with(*qemu_img_cmd)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
def _do_test_extract_snapshot(self, mock_execute, mock_direct_io,
mock_disk_op_sema,
src_format='qcow2',
dest_format='raw', out_format='raw'):
libvirt_utils.extract_snapshot('/path/to/disk/image', src_format,
'/extracted/snap', dest_format)
qemu_img_cmd = ('qemu-img', 'convert', '-t', 'none',
'-O', out_format, '-f', src_format, )
if CONF.libvirt.snapshot_compression and dest_format == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += ('/path/to/disk/image', '/extracted/snap')
mock_disk_op_sema.__enter__.assert_called_once()
mock_direct_io.assert_called_once_with(CONF.instances_path)
mock_execute.assert_called_once_with(*qemu_img_cmd)
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_raw(self, mock_execute):
self._do_test_extract_snapshot(mock_execute)
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_iso(self, mock_execute):
self._do_test_extract_snapshot(mock_execute, dest_format='iso')
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_qcow2(self, mock_execute):
self._do_test_extract_snapshot(mock_execute,
dest_format='qcow2', out_format='qcow2')
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_qcow2_and_compression(self, mock_execute):
self.flags(snapshot_compression=True, group='libvirt')
self._do_test_extract_snapshot(mock_execute,
dest_format='qcow2', out_format='qcow2')
@mock.patch('oslo_concurrency.processutils.execute')
def test_extract_snapshot_parallels(self, mock_execute):
self._do_test_extract_snapshot(mock_execute,
src_format='raw',
dest_format='ploop',
out_format='parallels')
def test_load_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
self.assertEqual(libvirt_utils.load_file(dst_path), 'hello')
finally:
os.unlink(dst_path)
def test_file_open(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
with libvirt_utils.file_open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_get_fs_info(self):
class FakeStatResult(object):
def __init__(self):
self.f_bsize = 4096
self.f_frsize = 4096
self.f_blocks = 2000
self.f_bfree = 1000
self.f_bavail = 900
self.f_files = 2000
self.f_ffree = 1000
self.f_favail = 900
self.f_flag = 4096
self.f_namemax = 255
self.path = None
def fake_statvfs(path):
self.path = path
return FakeStatResult()
self.stub_out('os.statvfs', fake_statvfs)
fs_info = libvirt_utils.get_fs_info('/some/file/path')
self.assertEqual('/some/file/path', self.path)
self.assertEqual(8192000, fs_info['total'])
self.assertEqual(3686400, fs_info['free'])
self.assertEqual(4096000, fs_info['used'])
@mock.patch('nova.virt.images.fetch_to_raw')
def test_fetch_image(self, mock_images):
context = 'opaque context'
target = '/tmp/targetfile'
image_id = '4'
trusted_certs = objects.TrustedCerts(
ids=['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8',
'674736e3-f25c-405c-8362-bbf991e0ce0a'])
libvirt_utils.fetch_image(context, target, image_id, trusted_certs)
mock_images.assert_called_once_with(
context, image_id, target, trusted_certs)
@mock.patch('nova.virt.images.fetch')
def test_fetch_initrd_image(self, mock_images):
_context = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
target = '/tmp/targetfile'
image_id = '4'
trusted_certs = objects.TrustedCerts(
ids=['0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8',
'674736e3-f25c-405c-8362-bbf991e0ce0a'])
libvirt_utils.fetch_raw_image(_context, target, image_id,
trusted_certs)
mock_images.assert_called_once_with(
_context, image_id, target, trusted_certs)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
@mock.patch('nova.privsep.qemu.unprivileged_convert_image')
def test_fetch_raw_image(self, mock_convert_image, mock_direct_io,
mock_disk_op_sema):
def fake_rename(old, new):
self.executes.append(('mv', old, new))
def fake_unlink(path):
self.executes.append(('rm', path))
def fake_rm_on_error(path, remove=None):
self.executes.append(('rm', '-f', path))
def fake_qemu_img_info(path):
class FakeImgInfo(object):
pass
file_format = path.split('.')[-1]
if file_format == 'part':
file_format = path.split('.')[-2]
elif file_format == 'converted':
file_format = 'raw'
if 'backing' in path:
backing_file = 'backing'
else:
backing_file = None
FakeImgInfo.file_format = file_format
FakeImgInfo.backing_file = backing_file
FakeImgInfo.virtual_size = 1
return FakeImgInfo()
self.stub_out('os.rename', fake_rename)
self.stub_out('os.unlink', fake_unlink)
self.stub_out('nova.virt.images.fetch', lambda *_, **__: None)
self.stub_out('nova.virt.images.qemu_img_info', fake_qemu_img_info)
self.stub_out('oslo_utils.fileutils.delete_if_exists',
fake_rm_on_error)
old_rm_path_on_error = fileutils.remove_path_on_error
f = functools.partial(old_rm_path_on_error, remove=fake_rm_on_error)
self.stub_out('oslo_utils.fileutils.remove_path_on_error', f)
context = 'opaque context'
image_id = '4'
target = 't.qcow2'
self.executes = []
expected_commands = [('rm', 't.qcow2.part'),
('mv', 't.qcow2.converted', 't.qcow2')]
images.fetch_to_raw(context, image_id, target)
self.assertEqual(self.executes, expected_commands)
mock_disk_op_sema.__enter__.assert_called_once()
mock_convert_image.assert_called_with(
't.qcow2.part', 't.qcow2.converted', 'qcow2', 'raw',
CONF.instances_path, False)
mock_convert_image.reset_mock()
target = 't.raw'
self.executes = []
expected_commands = [('mv', 't.raw.part', 't.raw')]
images.fetch_to_raw(context, image_id, target)
self.assertEqual(self.executes, expected_commands)
mock_convert_image.assert_not_called()
target = 'backing.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'backing.qcow2.part')]
self.assertRaises(exception.ImageUnacceptable,
images.fetch_to_raw, context, image_id, target)
self.assertEqual(self.executes, expected_commands)
mock_convert_image.assert_not_called()
del self.executes
def test_get_disk_backing_file(self):
with_actual_path = False
def fake_execute(*args, **kwargs):
if with_actual_path:
return ("some: output\n"
"backing file: /foo/bar/baz (actual path: /a/b/c)\n"
"...: ...\n"), ''
else:
return ("some: output\n"
"backing file: /foo/bar/baz\n"
"...: ...\n"), ''
def return_true(*args, **kwargs):
return True
self.stub_out('oslo_concurrency.processutils.execute', fake_execute)
self.stub_out('os.path.exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
with_actual_path = True
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'c')
def test_get_instance_path_at_destination(self):
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid=uuids.instance)
migrate_data = None
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
expected_path = os.path.join(CONF.instances_path, instance['uuid'])
self.assertEqual(expected_path, inst_path_at_dest)
migrate_data = {}
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
expected_path = os.path.join(CONF.instances_path, instance['uuid'])
self.assertEqual(expected_path, inst_path_at_dest)
migrate_data = objects.LibvirtLiveMigrateData(
instance_relative_path='fake_relative_path')
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
expected_path = os.path.join(CONF.instances_path, 'fake_relative_path')
self.assertEqual(expected_path, inst_path_at_dest)
def test_get_arch(self):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'architecture': "X86_64"}})
image_arch = libvirt_utils.get_arch(image_meta)
self.assertEqual(obj_fields.Architecture.X86_64, image_arch)
def test_is_mounted(self):
mount_path = "/var/lib/nova/mnt"
source = "192.168.0.1:/nova"
proc_with_mnt = """/dev/sda3 / xfs rw,seclabel,attr2,inode64 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
192.168.0.1:/nova /var/lib/nova/mnt nfs4 rw,relatime,vers=4.1
"""
proc_wrong_mnt = """/dev/sda3 / xfs rw,seclabel,attr2,inode64 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
192.168.0.2:/nova /var/lib/nova/mnt nfs4 rw,relatime,vers=4.1
"""
proc_without_mnt = """/dev/sda3 / xfs rw,seclabel,,attr2,inode64 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
sunrpc /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
"""
with mock.patch.object(os.path, 'ismount') as mock_ismount:
mock_ismount.return_value = False
self.assertFalse(libvirt_utils.is_mounted(mount_path))
mock_ismount.return_value = True
self.assertTrue(libvirt_utils.is_mounted(mount_path))
proc_mnt = mock.mock_open(read_data=proc_with_mnt)
with mock.patch.object(six.moves.builtins, "open", proc_mnt):
self.assertTrue(libvirt_utils.is_mounted(mount_path, source))
proc_mnt = mock.mock_open(read_data=proc_wrong_mnt)
with mock.patch.object(six.moves.builtins, "open", proc_mnt):
self.assertFalse(libvirt_utils.is_mounted(mount_path, source))
# Source is given, and mountpoint isn't present in /proc/mounts
# previously returned False in this case.
proc_umnt = mock.mock_open(read_data=proc_without_mnt)
with mock.patch.object(six.moves.builtins, "open", proc_umnt):
self.assertFalse(libvirt_utils.is_mounted(mount_path, source))
def test_find_disk_file_device(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='kvm'>
<os>
<type>linux</type>
</os>
<devices>
<disk type="file" device="disk">
<driver name="qemu" type="qcow2" cache="none" io="native"/>
<source file="/tmp/hello"/>
<target bus="ide" dev="/dev/hda"/>
</disk>
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('/tmp/hello', disk_path)
self.assertEqual('qcow2', format)
def test_find_disk_block_device(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='kvm'>
<os>
<type>linux</type>
</os>
<devices>
<disk type="block" device="disk">
<driver name="qemu" type="raw"/>
<source dev="/dev/nova-vg/hello"/>
<target bus="ide" dev="/dev/hda"/>
</disk>
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('/dev/nova-vg/hello', disk_path)
self.assertEqual('raw', format)
def test_find_disk_rbd(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='kvm'>
<os>
<type>linux</type>
</os>
<devices>
<disk type="network" device="disk">
<driver name="qemu" type="raw"/>
<source name="pool/image" protocol="rbd">
<host name="1.2.3.4" port="456"/>
</source>
<target bus="virtio" dev="/dev/vda"/>
</disk>
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('rbd:pool/image', disk_path)
self.assertEqual('raw', format)
def test_find_disk_lxc(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='lxc'>
<os>
<type>exe</type>
</os>
<devices>
<filesystem type="mount">
<source dir="/myhome/rootfs"/>
<target dir="/"/>
</filesystem>
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('/myhome/disk', disk_path)
self.assertIsNone(format)
def test_find_disk_parallels(self):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
xml = """
<domain type='parallels'>
<os>
<type>exe</type>
</os>
<devices>
<filesystem type='file'>"
<driver format='ploop' type='ploop'/>"
<source file='/test/disk'/>"
<target dir='/'/>
</filesystem>"
</devices>
</domain>
"""
virt_dom = mock.Mock(XMLDesc=mock.Mock(return_value=xml))
guest = libvirt_guest.Guest(virt_dom)
disk_path, format = libvirt_utils.find_disk(guest)
self.assertEqual('/test/disk', disk_path)
self.assertEqual('ploop', format)
@mock.patch('nova.virt.libvirt.utils.get_arch')
def test_get_machine_type_from_fallbacks(self, mock_get_arch):
image_meta = objects.ImageMeta.from_dict({"disk_format": "raw"})
host_cpu_archs = {
obj_fields.Architecture.ARMV7: "virt",
obj_fields.Architecture.AARCH64: "virt",
obj_fields.Architecture.S390: "s390-ccw-virtio",
obj_fields.Architecture.S390X: "s390-ccw-virtio",
obj_fields.Architecture.I686: "pc",
obj_fields.Architecture.X86_64: "pc",
}
for arch, expected_mtype in host_cpu_archs.items():
mock_get_arch.return_value = arch
mtype = libvirt_utils.get_machine_type(image_meta)
self.assertEqual(expected_mtype, mtype)
def test_get_machine_type_from_conf(self):
self.useFixture(nova_fixtures.ConfPatcher(
group="libvirt", hw_machine_type=['x86_64=q35', 'i686=legacy']))
self.assertEqual('q35',
libvirt_utils.get_default_machine_type('x86_64'))
def test_get_machine_type_no_conf_or_fallback(self):
self.assertIsNone(libvirt_utils.get_default_machine_type('sparc'))
def test_get_machine_type_missing_conf_and_fallback(self):
self.useFixture(nova_fixtures.ConfPatcher(
group="libvirt", hw_machine_type=['x86_64=q35', 'i686=legacy']))
self.assertIsNone(libvirt_utils.get_default_machine_type('sparc'))
def test_get_machine_type_survives_invalid_conf(self):
self.useFixture(nova_fixtures.ConfPatcher(
group="libvirt", hw_machine_type=['x86_64=q35', 'foo']))
self.assertEqual('q35',
libvirt_utils.get_default_machine_type('x86_64'))
def test_get_machine_type_from_image(self):
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw", "properties": {"hw_machine_type": "q35"}
})
os_mach_type = libvirt_utils.get_machine_type(image_meta)
self.assertEqual('q35', os_mach_type)
def test_get_flags_by_flavor_specs(self):
flavor = objects.Flavor(
id=1, flavorid='fakeid-1', name='fake1.small', memory_mb=128,
vcpus=1, root_gb=1, ephemeral_gb=0, swap=0, rxtx_factor=0,
deleted=False, extra_specs={
'trait:%s' % os_traits.HW_CPU_X86_3DNOW: 'required',
'trait:%s' % os_traits.HW_CPU_X86_SSE2: 'required',
'trait:%s' % os_traits.HW_CPU_HYPERTHREADING: 'required',
})
traits = libvirt_utils.get_flags_by_flavor_specs(flavor)
# we shouldn't see the hyperthreading trait since that's a valid trait
# but not a CPU flag
self.assertEqual(set(['3dnow', 'sse2']), traits)
| true | true |
1c3b3e051d2c2972a081012a9b8758d9e7a1e042 | 6,503 | py | Python | src/silx/io/test/test_octaveh5.py | rnwatanabe/silx | b0395f4a06c048b7778dc04ada828edd195ef02d | [
"CC0-1.0",
"MIT"
] | 94 | 2016-03-04T17:25:53.000Z | 2022-03-18T18:05:23.000Z | src/silx/io/test/test_octaveh5.py | rnwatanabe/silx | b0395f4a06c048b7778dc04ada828edd195ef02d | [
"CC0-1.0",
"MIT"
] | 2,841 | 2016-01-21T09:06:49.000Z | 2022-03-18T14:53:56.000Z | src/silx/io/test/test_octaveh5.py | rnwatanabe/silx | b0395f4a06c048b7778dc04ada828edd195ef02d | [
"CC0-1.0",
"MIT"
] | 71 | 2015-09-30T08:35:35.000Z | 2022-03-16T07:16:28.000Z | # coding: utf-8
# /*##########################################################################
# Copyright (C) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""
Tests for the octaveh5 module
"""
__authors__ = ["C. Nemoz", "H. Payno"]
__license__ = "MIT"
__date__ = "12/07/2016"
import unittest
import os
import tempfile
try:
from ..octaveh5 import Octaveh5
except ImportError:
Octaveh5 = None
@unittest.skipIf(Octaveh5 is None, "Could not import h5py")
class TestOctaveH5(unittest.TestCase):
@staticmethod
def _get_struct_FT():
return {
'NO_CHECK': 0.0, 'SHOWSLICE': 1.0, 'DOTOMO': 1.0, 'DATABASE': 0.0, 'ANGLE_OFFSET': 0.0,
'VOLSELECTION_REMEMBER': 0.0, 'NUM_PART': 4.0, 'VOLOUTFILE': 0.0, 'RINGSCORRECTION': 0.0,
'DO_TEST_SLICE': 1.0, 'ZEROOFFMASK': 1.0, 'VERSION': 'fastomo3 version 2.0',
'CORRECT_SPIKES_THRESHOLD': 0.040000000000000001, 'SHOWPROJ': 0.0, 'HALF_ACQ': 0.0,
'ANGLE_OFFSET_VALUE': 0.0, 'FIXEDSLICE': 'middle', 'VOLSELECT': 'total' }
@staticmethod
def _get_struct_PYHSTEXE():
return {
'EXE': 'PyHST2_2015d', 'VERBOSE': 0.0, 'OFFV': 'PyHST2_2015d', 'TOMO': 0.0,
'VERBOSE_FILE': 'pyhst_out.txt', 'DIR': '/usr/bin/', 'OFFN': 'pyhst2'}
@staticmethod
def _get_struct_FTAXIS():
return {
'POSITION_VALUE': 12345.0, 'COR_ERROR': 0.0, 'FILESDURINGSCAN': 0.0, 'PLOTFIGURE': 1.0,
'DIM1': 0.0, 'OVERSAMPLING': 5.0, 'TO_THE_CENTER': 1.0, 'POSITION': 'fixed',
'COR_POSITION': 0.0, 'HA': 0.0 }
@staticmethod
def _get_struct_PAGANIN():
return {
'MKEEP_MASK': 0.0, 'UNSHARP_SIGMA': 0.80000000000000004, 'DILATE': 2.0, 'UNSHARP_COEFF': 3.0,
'MEDIANR': 4.0, 'DB': 500.0, 'MKEEP_ABS': 0.0, 'MODE': 0.0, 'THRESHOLD': 0.5,
'MKEEP_BONE': 0.0, 'DB2': 100.0, 'MKEEP_CORR': 0.0, 'MKEEP_SOFT': 0.0 }
@staticmethod
def _get_struct_BEAMGEO():
return {'DIST': 55.0, 'SY': 0.0, 'SX': 0.0, 'TYPE': 'p'}
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.test_3_6_fname = os.path.join(self.tempdir, "silx_tmp_t00_octaveTest_3_6.h5")
self.test_3_8_fname = os.path.join(self.tempdir, "silx_tmp_t00_octaveTest_3_8.h5")
def tearDown(self):
if os.path.isfile(self.test_3_6_fname):
os.unlink(self.test_3_6_fname)
if os.path.isfile(self.test_3_8_fname):
os.unlink(self.test_3_8_fname)
def testWritedIsReaded(self):
"""
Simple test to write and reaf the structure compatible with the octave h5 using structure.
This test is for # test for octave version > 3.8
"""
writer = Octaveh5()
writer.open(self.test_3_8_fname, 'a')
# step 1 writing the file
writer.write('FT', self._get_struct_FT())
writer.write('PYHSTEXE', self._get_struct_PYHSTEXE())
writer.write('FTAXIS', self._get_struct_FTAXIS())
writer.write('PAGANIN', self._get_struct_PAGANIN())
writer.write('BEAMGEO', self._get_struct_BEAMGEO())
writer.close()
# step 2 reading the file
reader = Octaveh5().open(self.test_3_8_fname)
# 2.1 check FT
data_readed = reader.get('FT')
self.assertEqual(data_readed, self._get_struct_FT() )
# 2.2 check PYHSTEXE
data_readed = reader.get('PYHSTEXE')
self.assertEqual(data_readed, self._get_struct_PYHSTEXE() )
# 2.3 check FTAXIS
data_readed = reader.get('FTAXIS')
self.assertEqual(data_readed, self._get_struct_FTAXIS() )
# 2.4 check PAGANIN
data_readed = reader.get('PAGANIN')
self.assertEqual(data_readed, self._get_struct_PAGANIN() )
# 2.5 check BEAMGEO
data_readed = reader.get('BEAMGEO')
self.assertEqual(data_readed, self._get_struct_BEAMGEO() )
reader.close()
def testWritedIsReadedOldOctaveVersion(self):
"""The same test as testWritedIsReaded but for octave version < 3.8
"""
# test for octave version < 3.8
writer = Octaveh5(3.6)
writer.open(self.test_3_6_fname, 'a')
# step 1 writing the file
writer.write('FT', self._get_struct_FT())
writer.write('PYHSTEXE', self._get_struct_PYHSTEXE())
writer.write('FTAXIS', self._get_struct_FTAXIS())
writer.write('PAGANIN', self._get_struct_PAGANIN())
writer.write('BEAMGEO', self._get_struct_BEAMGEO())
writer.close()
# step 2 reading the file
reader = Octaveh5(3.6).open(self.test_3_6_fname)
# 2.1 check FT
data_readed = reader.get('FT')
self.assertEqual(data_readed, self._get_struct_FT() )
# 2.2 check PYHSTEXE
data_readed = reader.get('PYHSTEXE')
self.assertEqual(data_readed, self._get_struct_PYHSTEXE() )
# 2.3 check FTAXIS
data_readed = reader.get('FTAXIS')
self.assertEqual(data_readed, self._get_struct_FTAXIS() )
# 2.4 check PAGANIN
data_readed = reader.get('PAGANIN')
self.assertEqual(data_readed, self._get_struct_PAGANIN() )
# 2.5 check BEAMGEO
data_readed = reader.get('BEAMGEO')
self.assertEqual(data_readed, self._get_struct_BEAMGEO() )
reader.close()
| 41.420382 | 106 | 0.629863 | true | true | |
1c3b3e2c456c705d4ff38427010b3887f3fb70e7 | 13,144 | py | Python | ctapipe/visualization/mpl_array.py | LukasBeiske/ctapipe | 8325700ca01cbae62733c2f41de4113013f18939 | [
"BSD-3-Clause"
] | null | null | null | ctapipe/visualization/mpl_array.py | LukasBeiske/ctapipe | 8325700ca01cbae62733c2f41de4113013f18939 | [
"BSD-3-Clause"
] | null | null | null | ctapipe/visualization/mpl_array.py | LukasBeiske/ctapipe | 8325700ca01cbae62733c2f41de4113013f18939 | [
"BSD-3-Clause"
] | null | null | null | from itertools import cycle
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.lines import Line2D
from matplotlib.patches import Circle
from ctapipe.coordinates import GroundFrame
from ctapipe.visualization.mpl_camera import polar_to_cart
class ArrayDisplay:
"""
Display a top-town view of a telescope array.
This can be used in two ways: by default, you get a display of all
telescopes in the subarray, colored by telescope type, however you can
also color the telescopes by a value (like trigger pattern, or some other
scalar per-telescope parameter). To set the color value, simply set the
``value`` attribute, and the fill color will be updated with the value. You
might want to set the border color to zero to avoid confusion between the
telescope type color and the value color (
``array_disp.telescope.set_linewidth(0)``)
To display a vector field over the telescope positions, e.g. for
reconstruction, call `set_vector_uv()` to set cartesian vectors,
or `set_vector_rho_phi()` to set polar coordinate vectors.
These both take an array of length N_tels, or a single value.
Parameters
----------
subarray: ctapipe.instrument.SubarrayDescription
the array layout to display
axes: matplotlib.axes.Axes
matplotlib axes to plot on, or None to use current one
title: str
title of array plot
tel_scale: float
scaling between telescope mirror radius in m to displayed size
autoupdate: bool
redraw when the input changes
radius: Union[float, list, None]
set telescope radius to value, list/array of values. If None, radius
is taken from the telescope's mirror size.
"""
def __init__(
self,
subarray,
axes=None,
autoupdate=True,
tel_scale=2.0,
alpha=0.7,
title=None,
radius=None,
frame=GroundFrame(),
):
self.frame = frame
self.subarray = subarray
self.axes = axes or plt.gca()
# get the telescope positions. If a new frame is set, this will
# transform to the new frame.
self.tel_coords = subarray.tel_coords.transform_to(frame).cartesian
self.unit = self.tel_coords.x.unit
# set up colors per telescope type
tel_types = [str(tel) for tel in subarray.tels.values()]
if radius is None:
# set radius to the mirror radius (so big tels appear big)
radius = [
np.sqrt(tel.optics.mirror_area.to("m2").value) * tel_scale
for tel in subarray.tel.values()
]
self.radii = radius
else:
self.radii = np.ones(len(tel_types)) * radius
if title is None:
title = subarray.name
# get default matplotlib color cycle (depends on the current style)
color_cycle = cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"])
# map a color to each telescope type:
tel_type_to_color = {}
for tel_type in list(set(tel_types)):
tel_type_to_color[tel_type] = next(color_cycle)
tel_color = [tel_type_to_color[ttype] for ttype in tel_types]
patches = []
for x, y, r, c in zip(
list(self.tel_coords.x.to_value("m")),
list(self.tel_coords.y.to_value("m")),
list(radius),
tel_color,
):
patches.append(Circle(xy=(x, y), radius=r, fill=True, color=c, alpha=alpha))
# build the legend:
legend_elements = []
for ttype in list(set(tel_types)):
color = tel_type_to_color[ttype]
legend_elements.append(
Line2D(
[0],
[0],
marker="o",
color=color,
label=ttype,
markersize=10,
alpha=alpha,
linewidth=0,
)
)
plt.legend(handles=legend_elements)
self.add_radial_grid()
# create the plot
self.tel_colors = tel_color
self.autoupdate = autoupdate
self.telescopes = PatchCollection(patches, match_original=True)
self.telescopes.set_linewidth(2.0)
self.axes.add_collection(self.telescopes)
self.axes.set_aspect(1.0)
self.axes.set_title(title)
xunit = self.tel_coords.x.unit.to_string("latex")
yunit = self.tel_coords.y.unit.to_string("latex")
xname, yname, _ = frame.get_representation_component_names().keys()
self.axes.set_xlabel(f"{xname} [{xunit}] $\\rightarrow$")
self.axes.set_ylabel(f"{yname} [{yunit}] $\\rightarrow$")
self._labels = []
self._quiver = None
self.axes.autoscale_view()
@property
def values(self):
"""An array containing a value per telescope"""
return self.telescopes.get_array()
@values.setter
def values(self, values):
"""set the telescope colors to display"""
self.telescopes.set_array(np.ma.masked_invalid(values))
self._update()
def add_radial_grid(self, spacing=100 * u.m):
"""add some dotted rings for distance estimation. The number of rings
is estimated automatically from the spacing and the array footprint.
Parameters
----------
spacing: Quantity
spacing between rings
"""
n_circles = np.round(
(np.sqrt(self.subarray.footprint / np.pi) / spacing).to_value(""),
0,
)
circle_radii = np.arange(1, n_circles + 2, 1) * spacing.to_value(self.unit)
circle_patches = PatchCollection(
[
Circle(
xy=(0, 0),
radius=r,
fill=False,
fc="none",
linestyle="dotted",
color="gray",
alpha=0.1,
lw=1,
)
for r in circle_radii
],
color="#eeeeee",
ls="dotted",
fc="none",
lw=3,
)
self.axes.add_collection(circle_patches)
def set_vector_uv(self, uu, vv, c=None, **kwargs):
"""sets the vector field U,V and color for all telescopes
Parameters
----------
uu: array[num_tels]
x-component of direction vector
vv: array[num_tels]
y-component of direction vector
c: color or list of colors
vector color for each telescope (or one for all)
kwargs:
extra args passed to plt.quiver(), ignored on subsequent updates
"""
coords = self.tel_coords
uu = u.Quantity(uu).to_value("m")
vv = u.Quantity(vv).to_value("m")
N = len(coords.x)
# matplotlib since 3.2 does not allow scalars anymore
# if quiver was already created with a certain number of arrows
if np.isscalar(uu):
uu = np.full(N, uu)
if np.isscalar(vv):
vv = np.full(N, vv)
# passing in None for C does not work, we need to provide
# a variadic number of arguments
args = [coords.x.to_value("m"), coords.y.to_value("m"), uu, vv]
if c is None:
# use colors by telescope type if the user did not provide any
kwargs["color"] = kwargs.get("color", self.tel_colors)
else:
# same as above, enable use of scalar to set all values at once
if np.isscalar(c):
c = np.full(N, c)
args.append(c)
if self._quiver is None:
self._quiver = self.axes.quiver(
*args, scale_units="xy", angles="xy", scale=1, **kwargs
)
else:
self._quiver.set_UVC(uu, vv, c)
def set_vector_rho_phi(self, rho, phi, c=None, **kwargs):
"""sets the vector field using R, Phi for each telescope
Parameters
----------
rho: float or array[float]
vector magnitude for each telescope
phi: array[Angle]
vector angle for each telescope
c: color or list of colors
vector color for each telescope (or one for all)
"""
phi = Angle(phi).rad
uu, vv = polar_to_cart(rho, phi)
self.set_vector_uv(uu, vv, c=c, **kwargs)
def set_vector_hillas(
self, hillas_dict, core_dict, length, time_gradient, angle_offset
):
"""
Function to set the vector angle and length from a set of Hillas parameters.
In order to proper use the arrow on the ground, also a dictionary with the time
gradients for the different telescopes is needed. If the gradient is 0 the arrow
is not plotted on the ground, whereas if the value of the gradient is negative,
the arrow is rotated by 180 degrees (Angle(angle_offset) not added).
This plotting behaviour has been tested with the timing_parameters function
in ctapipe/image.
Parameters
----------
hillas_dict: Dict[int, HillasParametersContainer]
mapping of tel_id to Hillas parameters
core_dict : Dict[int, CoreParameters]
mapping of tel_id to CoreParametersContainer
length: Float
length of the arrow (in meters)
time_gradient: Dict[int, value of time gradient (no units)]
dictionary for value of the time gradient for each telescope
angle_offset: Float
This should be the ``event.pointing.array_azimuth`` parameter
"""
# rot_angle_ellipse is psi parameter in HillasParametersContainer
rho = np.zeros(self.subarray.num_tels) * u.m
rot_angle_ellipse = np.zeros(self.subarray.num_tels) * u.deg
for tel_id, params in hillas_dict.items():
idx = self.subarray.tel_indices[tel_id]
rho[idx] = u.Quantity(length, u.m)
psi = core_dict[tel_id]
if time_gradient[tel_id] > 0.01:
angle_offset = Angle(angle_offset)
rot_angle_ellipse[idx] = psi + angle_offset + 180 * u.deg
elif time_gradient[tel_id] < -0.01:
rot_angle_ellipse[idx] = psi + angle_offset
else:
rho[idx] = 0 * u.m
self.set_vector_rho_phi(rho=rho, phi=rot_angle_ellipse)
def set_line_hillas(self, hillas_dict, core_dict, range, **kwargs):
"""
Plot the telescope-wise direction of the shower as a segment.
Each segment will be centered with a point on the telescope position
and will be 2*range long.
Parameters
----------
hillas_dict: Dict[int, HillasParametersContainer]
mapping of tel_id to Hillas parameters
core_dict : Dict[int, CoreParameters]
mapping of tel_id to CoreParametersContainer
range: float
half of the length of the segments to be plotted (in meters)
"""
coords = self.tel_coords
c = self.tel_colors
r = np.array([-range, range])
for tel_id, params in hillas_dict.items():
idx = self.subarray.tel_indices[tel_id]
x_0 = coords[idx].x.to_value(u.m)
y_0 = coords[idx].y.to_value(u.m)
psi = core_dict[tel_id]
x = x_0 + np.cos(psi).value * r
y = y_0 + np.sin(psi).value * r
self.axes.plot(x, y, color=c[idx], **kwargs)
self.axes.scatter(x_0, y_0, color=c[idx])
def add_labels(self):
px = self.tel_coords.x.to_value("m")
py = self.tel_coords.y.to_value("m")
for tel, x, y, r in zip(self.subarray.tels, px, py, self.radii):
name = str(tel)
lab = self.axes.text(
x,
y - r * 1.8,
name,
fontsize=8,
clip_on=True,
horizontalalignment="center",
verticalalignment="top",
)
self._labels.append(lab)
def remove_labels(self):
for lab in self._labels:
lab.remove()
self._labels = []
def _update(self):
"""signal a redraw if necessary"""
if self.autoupdate:
plt.draw()
def background_contour(self, x, y, background, **kwargs):
"""
Draw image contours in background of the display, useful when likelihood fitting
Parameters
----------
x: ndarray
array of image X coordinates
y: ndarray
array of image Y coordinates
background: ndarray
Array of image to use in background
kwargs: key=value
any style keywords to pass to matplotlib
"""
# use zorder to ensure the contours appear under the telescopes.
self.axes.contour(x, y, background, zorder=0, **kwargs)
| 34.229167 | 88 | 0.582395 | from itertools import cycle
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from matplotlib import pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.lines import Line2D
from matplotlib.patches import Circle
from ctapipe.coordinates import GroundFrame
from ctapipe.visualization.mpl_camera import polar_to_cart
class ArrayDisplay:
def __init__(
self,
subarray,
axes=None,
autoupdate=True,
tel_scale=2.0,
alpha=0.7,
title=None,
radius=None,
frame=GroundFrame(),
):
self.frame = frame
self.subarray = subarray
self.axes = axes or plt.gca()
self.tel_coords = subarray.tel_coords.transform_to(frame).cartesian
self.unit = self.tel_coords.x.unit
tel_types = [str(tel) for tel in subarray.tels.values()]
if radius is None:
radius = [
np.sqrt(tel.optics.mirror_area.to("m2").value) * tel_scale
for tel in subarray.tel.values()
]
self.radii = radius
else:
self.radii = np.ones(len(tel_types)) * radius
if title is None:
title = subarray.name
color_cycle = cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"])
tel_type_to_color = {}
for tel_type in list(set(tel_types)):
tel_type_to_color[tel_type] = next(color_cycle)
tel_color = [tel_type_to_color[ttype] for ttype in tel_types]
patches = []
for x, y, r, c in zip(
list(self.tel_coords.x.to_value("m")),
list(self.tel_coords.y.to_value("m")),
list(radius),
tel_color,
):
patches.append(Circle(xy=(x, y), radius=r, fill=True, color=c, alpha=alpha))
legend_elements = []
for ttype in list(set(tel_types)):
color = tel_type_to_color[ttype]
legend_elements.append(
Line2D(
[0],
[0],
marker="o",
color=color,
label=ttype,
markersize=10,
alpha=alpha,
linewidth=0,
)
)
plt.legend(handles=legend_elements)
self.add_radial_grid()
self.tel_colors = tel_color
self.autoupdate = autoupdate
self.telescopes = PatchCollection(patches, match_original=True)
self.telescopes.set_linewidth(2.0)
self.axes.add_collection(self.telescopes)
self.axes.set_aspect(1.0)
self.axes.set_title(title)
xunit = self.tel_coords.x.unit.to_string("latex")
yunit = self.tel_coords.y.unit.to_string("latex")
xname, yname, _ = frame.get_representation_component_names().keys()
self.axes.set_xlabel(f"{xname} [{xunit}] $\\rightarrow$")
self.axes.set_ylabel(f"{yname} [{yunit}] $\\rightarrow$")
self._labels = []
self._quiver = None
self.axes.autoscale_view()
@property
def values(self):
return self.telescopes.get_array()
@values.setter
def values(self, values):
self.telescopes.set_array(np.ma.masked_invalid(values))
self._update()
def add_radial_grid(self, spacing=100 * u.m):
n_circles = np.round(
(np.sqrt(self.subarray.footprint / np.pi) / spacing).to_value(""),
0,
)
circle_radii = np.arange(1, n_circles + 2, 1) * spacing.to_value(self.unit)
circle_patches = PatchCollection(
[
Circle(
xy=(0, 0),
radius=r,
fill=False,
fc="none",
linestyle="dotted",
color="gray",
alpha=0.1,
lw=1,
)
for r in circle_radii
],
color="#eeeeee",
ls="dotted",
fc="none",
lw=3,
)
self.axes.add_collection(circle_patches)
def set_vector_uv(self, uu, vv, c=None, **kwargs):
coords = self.tel_coords
uu = u.Quantity(uu).to_value("m")
vv = u.Quantity(vv).to_value("m")
N = len(coords.x)
if np.isscalar(uu):
uu = np.full(N, uu)
if np.isscalar(vv):
vv = np.full(N, vv)
args = [coords.x.to_value("m"), coords.y.to_value("m"), uu, vv]
if c is None:
kwargs["color"] = kwargs.get("color", self.tel_colors)
else:
if np.isscalar(c):
c = np.full(N, c)
args.append(c)
if self._quiver is None:
self._quiver = self.axes.quiver(
*args, scale_units="xy", angles="xy", scale=1, **kwargs
)
else:
self._quiver.set_UVC(uu, vv, c)
def set_vector_rho_phi(self, rho, phi, c=None, **kwargs):
phi = Angle(phi).rad
uu, vv = polar_to_cart(rho, phi)
self.set_vector_uv(uu, vv, c=c, **kwargs)
def set_vector_hillas(
self, hillas_dict, core_dict, length, time_gradient, angle_offset
):
rho = np.zeros(self.subarray.num_tels) * u.m
rot_angle_ellipse = np.zeros(self.subarray.num_tels) * u.deg
for tel_id, params in hillas_dict.items():
idx = self.subarray.tel_indices[tel_id]
rho[idx] = u.Quantity(length, u.m)
psi = core_dict[tel_id]
if time_gradient[tel_id] > 0.01:
angle_offset = Angle(angle_offset)
rot_angle_ellipse[idx] = psi + angle_offset + 180 * u.deg
elif time_gradient[tel_id] < -0.01:
rot_angle_ellipse[idx] = psi + angle_offset
else:
rho[idx] = 0 * u.m
self.set_vector_rho_phi(rho=rho, phi=rot_angle_ellipse)
def set_line_hillas(self, hillas_dict, core_dict, range, **kwargs):
coords = self.tel_coords
c = self.tel_colors
r = np.array([-range, range])
for tel_id, params in hillas_dict.items():
idx = self.subarray.tel_indices[tel_id]
x_0 = coords[idx].x.to_value(u.m)
y_0 = coords[idx].y.to_value(u.m)
psi = core_dict[tel_id]
x = x_0 + np.cos(psi).value * r
y = y_0 + np.sin(psi).value * r
self.axes.plot(x, y, color=c[idx], **kwargs)
self.axes.scatter(x_0, y_0, color=c[idx])
def add_labels(self):
px = self.tel_coords.x.to_value("m")
py = self.tel_coords.y.to_value("m")
for tel, x, y, r in zip(self.subarray.tels, px, py, self.radii):
name = str(tel)
lab = self.axes.text(
x,
y - r * 1.8,
name,
fontsize=8,
clip_on=True,
horizontalalignment="center",
verticalalignment="top",
)
self._labels.append(lab)
def remove_labels(self):
for lab in self._labels:
lab.remove()
self._labels = []
def _update(self):
if self.autoupdate:
plt.draw()
def background_contour(self, x, y, background, **kwargs):
self.axes.contour(x, y, background, zorder=0, **kwargs)
| true | true |
1c3b3f1f7568f2c59cc3550b3a969d29919ef84a | 38,530 | py | Python | fosscordself/webhook.py | discordtehe/fosscord.py-self | c8f5d14471af0e226870128eecd92ee67e4abb06 | [
"MIT"
] | null | null | null | fosscordself/webhook.py | discordtehe/fosscord.py-self | c8f5d14471af0e226870128eecd92ee67e4abb06 | [
"MIT"
] | null | null | null | fosscordself/webhook.py | discordtehe/fosscord.py-self | c8f5d14471af0e226870128eecd92ee67e4abb06 | [
"MIT"
] | 1 | 2022-01-12T02:09:08.000Z | 2022-01-12T02:09:08.000Z | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import logging
import asyncio
import json
import time
import re
from urllib.parse import quote as _uriquote
import aiohttp
from . import utils
from .errors import InvalidArgument, HTTPException, Forbidden, NotFound, DiscordServerError
from .message import Message
from .enums import try_enum, WebhookType
from .user import BaseUser, User
from .asset import Asset
from .mixins import Hashable
__all__ = (
'WebhookAdapter',
'AsyncWebhookAdapter',
'RequestsWebhookAdapter',
'Webhook',
'WebhookMessage',
)
log = logging.getLogger(__name__)
class WebhookAdapter:
"""Base class for all webhook adapters.
Attributes
------------
webhook: :class:`Webhook`
The webhook that owns this adapter.
"""
BASE = 'https://discord.com/api/v7'
def _prepare(self, webhook):
self._webhook_id = webhook.id
self._webhook_token = webhook.token
self._request_url = '{0.BASE}/webhooks/{1}/{2}'.format(self, webhook.id, webhook.token)
self.webhook = webhook
def is_async(self):
return False
def request(self, verb, url, payload=None, multipart=None):
"""Actually does the request.
Subclasses must implement this.
Parameters
-----------
verb: :class:`str`
The HTTP verb to use for the request.
url: :class:`str`
The URL to send the request to. This will have
the query parameters already added to it, if any.
multipart: Optional[:class:`dict`]
A dict containing multipart form data to send with
the request. If a filename is being uploaded, then it will
be under a ``file`` key which will have a 3-element :class:`tuple`
denoting ``(filename, file, content_type)``.
payload: Optional[:class:`dict`]
The JSON to send with the request, if any.
"""
raise NotImplementedError()
def delete_webhook(self, *, reason=None):
return self.request('DELETE', self._request_url, reason=reason)
def edit_webhook(self, *, reason=None, **payload):
return self.request('PATCH', self._request_url, payload=payload, reason=reason)
def edit_webhook_message(self, message_id, payload):
return self.request('PATCH', '{}/messages/{}'.format(self._request_url, message_id), payload=payload)
def delete_webhook_message(self, message_id):
return self.request('DELETE', '{}/messages/{}'.format(self._request_url, message_id))
def handle_execution_response(self, data, *, wait):
"""Transforms the webhook execution response into something
more meaningful.
This is mainly used to convert the data into a :class:`Message`
if necessary.
Subclasses must implement this.
Parameters
------------
data
The data that was returned from the request.
wait: :class:`bool`
Whether the webhook execution was asked to wait or not.
"""
raise NotImplementedError()
async def _wrap_coroutine_and_cleanup(self, coro, cleanup):
try:
return await coro
finally:
cleanup()
def execute_webhook(self, *, payload, wait=False, file=None, files=None):
cleanup = None
if file is not None:
multipart = {
'file': (file.filename, file.fp, 'application/octet-stream'),
'payload_json': utils.to_json(payload)
}
data = None
cleanup = file.close
files_to_pass = [file]
elif files is not None:
multipart = {
'payload_json': utils.to_json(payload)
}
for i, file in enumerate(files):
multipart['file%i' % i] = (file.filename, file.fp, 'application/octet-stream')
data = None
def _anon():
for f in files:
f.close()
cleanup = _anon
files_to_pass = files
else:
data = payload
multipart = None
files_to_pass = None
url = '%s?wait=%d' % (self._request_url, wait)
maybe_coro = None
try:
maybe_coro = self.request('POST', url, multipart=multipart, payload=data, files=files_to_pass)
finally:
if maybe_coro is not None and cleanup is not None:
if not asyncio.iscoroutine(maybe_coro):
cleanup()
else:
maybe_coro = self._wrap_coroutine_and_cleanup(maybe_coro, cleanup)
# if request raises up there then this should never be `None`
return self.handle_execution_response(maybe_coro, wait=wait)
class AsyncWebhookAdapter(WebhookAdapter):
"""A webhook adapter suited for use with aiohttp.
.. note::
You are responsible for cleaning up the client session.
Parameters
-----------
session: :class:`aiohttp.ClientSession`
The session to use to send requests.
"""
def __init__(self, session):
self.session = session
self.loop = asyncio.get_event_loop()
def is_async(self):
return True
async def request(self, verb, url, payload=None, multipart=None, *, files=None, reason=None):
headers = {}
data = None
files = files or []
if payload:
headers['Content-Type'] = 'application/json'
data = utils.to_json(payload)
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
base_url = url.replace(self._request_url, '/') or '/'
_id = self._webhook_id
for tries in range(5):
for file in files:
file.reset(seek=tries)
if multipart:
data = aiohttp.FormData()
for key, value in multipart.items():
if key.startswith('file'):
data.add_field(key, value[1], filename=value[0], content_type=value[2])
else:
data.add_field(key, value)
async with self.session.request(verb, url, headers=headers, data=data) as r:
log.debug('Webhook ID %s with %s %s has returned status code %s', _id, verb, base_url, r.status)
# Coerce empty strings to return None for hygiene purposes
response = (await r.text(encoding='utf-8')) or None
if r.headers['Content-Type'] == 'application/json':
response = json.loads(response)
# check if we have rate limit header information
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
delta = utils._parse_ratelimit_header(r)
log.debug('Webhook ID %s has been pre-emptively rate limited, waiting %.2f seconds', _id, delta)
await asyncio.sleep(delta)
if 300 > r.status >= 200:
return response
# we are being rate limited
if r.status == 429:
if not r.headers.get('Via'):
# Banned by Cloudflare more than likely.
raise HTTPException(r, data)
retry_after = response['retry_after'] / 1000.0
log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', _id, retry_after)
await asyncio.sleep(retry_after)
continue
if r.status in (500, 502):
await asyncio.sleep(1 + tries * 2)
continue
if r.status == 403:
raise Forbidden(r, response)
elif r.status == 404:
raise NotFound(r, response)
else:
raise HTTPException(r, response)
# no more retries
if r.status >= 500:
raise DiscordServerError(r, response)
raise HTTPException(r, response)
async def handle_execution_response(self, response, *, wait):
data = await response
if not wait:
return data
# transform into Message object
# Make sure to coerce the state to the partial one to allow message edits/delete
state = _PartialWebhookState(self, self.webhook, parent=self.webhook._state)
return WebhookMessage(data=data, state=state, channel=self.webhook.channel)
class RequestsWebhookAdapter(WebhookAdapter):
"""A webhook adapter suited for use with ``requests``.
Only versions of :doc:`req:index` higher than 2.13.0 are supported.
Parameters
-----------
session: Optional[`requests.Session <http://docs.python-requests.org/en/latest/api/#requests.Session>`_]
The requests session to use for sending requests. If not given then
each request will create a new session. Note if a session is given,
the webhook adapter **will not** clean it up for you. You must close
the session yourself.
sleep: :class:`bool`
Whether to sleep the thread when encountering a 429 or pre-emptive
rate limit or a 5xx status code. Defaults to ``True``. If set to
``False`` then this will raise an :exc:`HTTPException` instead.
"""
def __init__(self, session=None, *, sleep=True):
import requests
self.session = session or requests
self.sleep = sleep
def request(self, verb, url, payload=None, multipart=None, *, files=None, reason=None):
headers = {}
data = None
files = files or []
if payload:
headers['Content-Type'] = 'application/json'
data = utils.to_json(payload)
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
if multipart is not None:
data = {'payload_json': multipart.pop('payload_json')}
base_url = url.replace(self._request_url, '/') or '/'
_id = self._webhook_id
for tries in range(5):
for file in files:
file.reset(seek=tries)
r = self.session.request(verb, url, headers=headers, data=data, files=multipart)
r.encoding = 'utf-8'
# Coerce empty responses to return None for hygiene purposes
response = r.text or None
# compatibility with aiohttp
r.status = r.status_code
log.debug('Webhook ID %s with %s %s has returned status code %s', _id, verb, base_url, r.status)
if r.headers['Content-Type'] == 'application/json':
response = json.loads(response)
# check if we have rate limit header information
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429 and self.sleep:
delta = utils._parse_ratelimit_header(r)
log.debug('Webhook ID %s has been pre-emptively rate limited, waiting %.2f seconds', _id, delta)
time.sleep(delta)
if 300 > r.status >= 200:
return response
# we are being rate limited
if r.status == 429:
if self.sleep:
if not r.headers.get('Via'):
# Banned by Cloudflare more than likely.
raise HTTPException(r, data)
retry_after = response['retry_after'] / 1000.0
log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', _id, retry_after)
time.sleep(retry_after)
continue
else:
raise HTTPException(r, response)
if self.sleep and r.status in (500, 502):
time.sleep(1 + tries * 2)
continue
if r.status == 403:
raise Forbidden(r, response)
elif r.status == 404:
raise NotFound(r, response)
else:
raise HTTPException(r, response)
# no more retries
if r.status >= 500:
raise DiscordServerError(r, response)
raise HTTPException(r, response)
def handle_execution_response(self, response, *, wait):
if not wait:
return response
# transform into Message object
# Make sure to coerce the state to the partial one to allow message edits/delete
state = _PartialWebhookState(self, self.webhook, parent=self.webhook._state)
return WebhookMessage(data=response, state=state, channel=self.webhook.channel)
class _FriendlyHttpAttributeErrorHelper:
__slots__ = ()
def __getattr__(self, attr):
raise AttributeError('PartialWebhookState does not support http methods.')
class _PartialWebhookState:
__slots__ = ('loop', 'parent', '_webhook')
def __init__(self, adapter, webhook, parent):
self._webhook = webhook
if isinstance(parent, self.__class__):
self.parent = None
else:
self.parent = parent
# Fetch the loop from the adapter if it's there
try:
self.loop = adapter.loop
except AttributeError:
self.loop = None
def _get_guild(self, guild_id):
return None
def store_user(self, data):
return BaseUser(state=self, data=data)
@property
def http(self):
if self.parent is not None:
return self.parent.http
# Some data classes assign state.http and that should be kosher
# however, using it should result in a late-binding error.
return _FriendlyHttpAttributeErrorHelper()
def __getattr__(self, attr):
if self.parent is not None:
return getattr(self.parent, attr)
raise AttributeError('PartialWebhookState does not support {0!r}.'.format(attr))
class WebhookMessage(Message):
"""Represents a message sent from your webhook.
This allows you to edit or delete a message sent by your
webhook.
This inherits from :class:`discord.Message` with changes to
:meth:`edit` and :meth:`delete` to work.
.. versionadded:: 1.6
"""
def edit(self, **fields):
"""|maybecoro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionadded:: 1.6
Parameters
------------
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
InvalidArgument
You specified both ``embed`` and ``embeds`` or the length of
``embeds`` was invalid or there was no token associated with
this webhook.
"""
return self._state._webhook.edit_message(self.id, **fields)
def _delete_delay_sync(self, delay):
time.sleep(delay)
return self._state._webhook.delete_message(self.id)
async def _delete_delay_async(self, delay):
async def inner_call():
await asyncio.sleep(delay)
try:
await self._state._webhook.delete_message(self.id)
except HTTPException:
pass
asyncio.ensure_future(inner_call(), loop=self._state.loop)
return await asyncio.sleep(0)
def delete(self, *, delay=None):
"""|coro|
Deletes the message.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait before deleting the message.
If this is a coroutine, the waiting is done in the background and deletion failures
are ignored. If this is not a coroutine then the delay blocks the thread.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already.
HTTPException
Deleting the message failed.
"""
if delay is not None:
if self._state._webhook._adapter.is_async():
return self._delete_delay_async(delay)
else:
return self._delete_delay_sync(delay)
return self._state._webhook.delete_message(self.id)
async def invites():
raise NotImplementedError()
class Webhook(Hashable):
"""Represents a Discord webhook.
Webhooks are a form to send messages to channels in Discord without a
bot user or authentication.
There are two main ways to use Webhooks. The first is through the ones
received by the library such as :meth:`.Guild.webhooks` and
:meth:`.TextChannel.webhooks`. The ones received by the library will
automatically have an adapter bound using the library's HTTP session.
Those webhooks will have :meth:`~.Webhook.send`, :meth:`~.Webhook.delete` and
:meth:`~.Webhook.edit` as coroutines.
The second form involves creating a webhook object manually without having
it bound to a websocket connection using the :meth:`~.Webhook.from_url` or
:meth:`~.Webhook.partial` classmethods. This form allows finer grained control
over how requests are done, allowing you to mix async and sync code using either
:doc:`aiohttp <aio:index>` or :doc:`req:index`.
For example, creating a webhook from a URL and using :doc:`aiohttp <aio:index>`:
.. code-block:: python3
from fosscordself import Webhook, AsyncWebhookAdapter
import aiohttp
async def foo():
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url('url-here', adapter=AsyncWebhookAdapter(session))
await webhook.send('Hello World', username='Foo')
Or creating a webhook from an ID and token and using :doc:`req:index`:
.. code-block:: python3
import requests
from fosscordself import Webhook, RequestsWebhookAdapter
webhook = Webhook.partial(123456, 'abcdefg', adapter=RequestsWebhookAdapter())
webhook.send('Hello World', username='Foo')
.. container:: operations
.. describe:: x == y
Checks if two webhooks are equal.
.. describe:: x != y
Checks if two webhooks are not equal.
.. describe:: hash(x)
Returns the webhooks's hash.
.. versionchanged:: 1.4
Webhooks are now comparable and hashable.
Attributes
------------
id: :class:`int`
The webhook's ID
type: :class:`WebhookType`
The type of the webhook.
.. versionadded:: 1.3
token: Optional[:class:`str`]
The authentication token of the webhook. If this is ``None``
then the webhook cannot be used to make requests.
guild_id: Optional[:class:`int`]
The guild ID this webhook is for.
channel_id: Optional[:class:`int`]
The channel ID this webhook is for.
user: Optional[:class:`abc.User`]
The user this webhook was created by. If the webhook was
received without authentication then this will be ``None``.
name: Optional[:class:`str`]
The default name of the webhook.
avatar: Optional[:class:`str`]
The default avatar of the webhook.
"""
__slots__ = ('id', 'type', 'guild_id', 'channel_id', 'user', 'name',
'avatar', 'token', '_state', '_adapter')
def __init__(self, data, *, adapter, state=None):
self.id = int(data['id'])
self.type = try_enum(WebhookType, int(data['type']))
self.channel_id = utils._get_as_snowflake(data, 'channel_id')
self.guild_id = utils._get_as_snowflake(data, 'guild_id')
self.name = data.get('name')
self.avatar = data.get('avatar')
self.token = data.get('token')
self._state = state or _PartialWebhookState(adapter, self, parent=state)
self._adapter = adapter
self._adapter._prepare(self)
user = data.get('user')
if user is None:
self.user = None
elif state is None:
self.user = BaseUser(state=None, data=user)
else:
self.user = User(state=state, data=user)
def __repr__(self):
return '<Webhook id=%r>' % self.id
@property
def url(self):
""":class:`str` : Returns the webhook's url."""
return 'https://discord.com/api/webhooks/{}/{}'.format(self.id, self.token)
@classmethod
def partial(cls, id, token, *, adapter):
"""Creates a partial :class:`Webhook`.
Parameters
-----------
id: :class:`int`
The ID of the webhook.
token: :class:`str`
The authentication token of the webhook.
adapter: :class:`WebhookAdapter`
The webhook adapter to use when sending requests. This is
typically :class:`AsyncWebhookAdapter` for :doc:`aiohttp <aio:index>` or
:class:`RequestsWebhookAdapter` for :doc:`req:index`.
Returns
--------
:class:`Webhook`
A partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
"""
if not isinstance(adapter, WebhookAdapter):
raise TypeError('adapter must be a subclass of WebhookAdapter')
data = {
'id': id,
'type': 1,
'token': token
}
return cls(data, adapter=adapter)
@classmethod
def from_url(cls, url, *, adapter):
"""Creates a partial :class:`Webhook` from a webhook URL.
Parameters
------------
url: :class:`str`
The URL of the webhook.
adapter: :class:`WebhookAdapter`
The webhook adapter to use when sending requests. This is
typically :class:`AsyncWebhookAdapter` for :doc:`aiohttp <aio:index>` or
:class:`RequestsWebhookAdapter` for :doc:`req:index`.
Raises
-------
InvalidArgument
The URL is invalid.
Returns
--------
:class:`Webhook`
A partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
"""
m = re.search(r'discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,20})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})', url)
if m is None:
raise InvalidArgument('Invalid webhook URL given.')
data = m.groupdict()
data['type'] = 1
return cls(data, adapter=adapter)
@classmethod
def _as_follower(cls, data, *, channel, user):
name = "{} #{}".format(channel.guild, channel)
feed = {
'id': data['webhook_id'],
'type': 2,
'name': name,
'channel_id': channel.id,
'guild_id': channel.guild.id,
'user': {
'username': user.name,
'discriminator': user.discriminator,
'id': user.id,
'avatar': user.avatar
}
}
session = channel._state.http._HTTPClient__session
return cls(feed, adapter=AsyncWebhookAdapter(session=session))
@classmethod
def from_state(cls, data, state):
session = state.http._HTTPClient__session
return cls(data, adapter=AsyncWebhookAdapter(session=session), state=state)
@property
def guild(self):
"""Optional[:class:`Guild`]: The guild this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
return self._state._get_guild(self.guild_id)
@property
def channel(self):
"""Optional[:class:`TextChannel`]: The text channel this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
guild = self.guild
return guild and guild.get_channel(self.channel_id)
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the webhook's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def avatar_url(self):
""":class:`Asset`: Returns an :class:`Asset` for the avatar the webhook has.
If the webhook does not have a traditional avatar, an asset for
the default avatar is returned instead.
This is equivalent to calling :meth:`avatar_url_as` with the
default parameters.
"""
return self.avatar_url_as()
def avatar_url_as(self, *, format=None, size=1024):
"""Returns an :class:`Asset` for the avatar the webhook has.
If the webhook does not have a traditional avatar, an asset for
the default avatar is returned instead.
The format must be one of 'jpeg', 'jpg', or 'png'.
The size must be a power of 2 between 16 and 1024.
Parameters
-----------
format: Optional[:class:`str`]
The format to attempt to convert the avatar to.
If the format is ``None``, then it is equivalent to png.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
if self.avatar is None:
# Default is always blurple apparently
return Asset(self._state, '/embed/avatars/0.png')
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 1024")
format = format or 'png'
if format not in ('png', 'jpg', 'jpeg'):
raise InvalidArgument("format must be one of 'png', 'jpg', or 'jpeg'.")
url = '/avatars/{0.id}/{0.avatar}.{1}?size={2}'.format(self, format, size)
return Asset(self._state, url)
def delete(self, *, reason=None):
"""|maybecoro|
Deletes this Webhook.
If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is
not a coroutine.
Parameters
------------
reason: Optional[:class:`str`]
The reason for deleting this webhook. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
HTTPException
Deleting the webhook failed.
NotFound
This webhook does not exist.
Forbidden
You do not have permissions to delete this webhook.
InvalidArgument
This webhook does not have a token associated with it.
"""
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
return self._adapter.delete_webhook(reason=reason)
def edit(self, *, reason=None, **kwargs):
"""|maybecoro|
Edits this Webhook.
If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is
not a coroutine.
Parameters
------------
name: Optional[:class:`str`]
The webhook's new default name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's new default avatar.
reason: Optional[:class:`str`]
The reason for editing this webhook. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
HTTPException
Editing the webhook failed.
NotFound
This webhook does not exist.
InvalidArgument
This webhook does not have a token associated with it.
"""
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
payload = {}
try:
name = kwargs['name']
except KeyError:
pass
else:
if name is not None:
payload['name'] = str(name)
else:
payload['name'] = None
try:
avatar = kwargs['avatar']
except KeyError:
pass
else:
if avatar is not None:
payload['avatar'] = utils._bytes_to_base64_data(avatar)
else:
payload['avatar'] = None
return self._adapter.edit_webhook(reason=reason, **payload)
def send(self, content=None, *, wait=False, username=None, avatar_url=None, tts=False,
file=None, files=None, embed=None, embeds=None, allowed_mentions=None):
"""|maybecoro|
Sends a message using the webhook.
If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is
not a coroutine.
The content must be a type that can convert to a string through ``str(content)``.
To upload a single file, the ``file`` parameter should be used with a
single :class:`File` object.
If the ``embed`` parameter is provided, it must be of type :class:`Embed` and
it must be a rich embed type. You cannot mix the ``embed`` parameter with the
``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send.
Parameters
------------
content: :class:`str`
The content of the message to send.
wait: :class:`bool`
Whether the server should wait before sending a response. This essentially
means that the return type of this function changes from ``None`` to
a :class:`WebhookMessage` if set to ``True``.
username: :class:`str`
The username to send with this message. If no username is provided
then the default username for the webhook is used.
avatar_url: Union[:class:`str`, :class:`Asset`]
The avatar URL to send with this message. If no avatar URL is provided
then the default avatar for the webhook is used.
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
file: :class:`File`
The file to upload. This cannot be mixed with ``files`` parameter.
files: List[:class:`File`]
A list of files to send with the content. This cannot be mixed with the
``file`` parameter.
embed: :class:`Embed`
The rich embed for the content to send. This cannot be mixed with
``embeds`` parameter.
embeds: List[:class:`Embed`]
A list of embeds to send with the content. Maximum of 10. This cannot
be mixed with the ``embed`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
.. versionadded:: 1.4
Raises
--------
HTTPException
Sending the message failed.
NotFound
This webhook was not found.
Forbidden
The authorization token for the webhook is incorrect.
InvalidArgument
You specified both ``embed`` and ``embeds`` or the length of
``embeds`` was invalid or there was no token associated with
this webhook.
Returns
---------
Optional[:class:`WebhookMessage`]
The message that was sent.
"""
payload = {}
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
if files is not None and file is not None:
raise InvalidArgument('Cannot mix file and files keyword arguments.')
if embeds is not None and embed is not None:
raise InvalidArgument('Cannot mix embed and embeds keyword arguments.')
if embeds is not None:
if len(embeds) > 10:
raise InvalidArgument('embeds has a maximum of 10 elements.')
payload['embeds'] = [e.to_dict() for e in embeds]
if embed is not None:
payload['embeds'] = [embed.to_dict()]
if content is not None:
payload['content'] = str(content)
payload['tts'] = tts
if avatar_url:
payload['avatar_url'] = str(avatar_url)
if username:
payload['username'] = username
previous_mentions = getattr(self._state, 'allowed_mentions', None)
if allowed_mentions:
if previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.merge(allowed_mentions).to_dict()
else:
payload['allowed_mentions'] = allowed_mentions.to_dict()
elif previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.to_dict()
return self._adapter.execute_webhook(wait=wait, file=file, files=files, payload=payload)
def execute(self, *args, **kwargs):
"""An alias for :meth:`~.Webhook.send`."""
return self.send(*args, **kwargs)
def edit_message(self, message_id, **fields):
"""|maybecoro|
Edits a message owned by this webhook.
This is a lower level interface to :meth:`WebhookMessage.edit` in case
you only have an ID.
.. versionadded:: 1.6
Parameters
------------
message_id: :class:`int`
The message ID to edit.
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
InvalidArgument
You specified both ``embed`` and ``embeds`` or the length of
``embeds`` was invalid or there was no token associated with
this webhook.
"""
payload = {}
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
try:
content = fields['content']
except KeyError:
pass
else:
if content is not None:
content = str(content)
payload['content'] = content
# Check if the embeds interface is being used
try:
embeds = fields['embeds']
except KeyError:
# Nope
pass
else:
if embeds is None or len(embeds) > 10:
raise InvalidArgument('embeds has a maximum of 10 elements')
payload['embeds'] = [e.to_dict() for e in embeds]
try:
embed = fields['embed']
except KeyError:
pass
else:
if 'embeds' in payload:
raise InvalidArgument('Cannot mix embed and embeds keyword arguments')
if embed is None:
payload['embeds'] = []
else:
payload['embeds'] = [embed.to_dict()]
allowed_mentions = fields.pop('allowed_mentions', None)
previous_mentions = getattr(self._state, 'allowed_mentions', None)
if allowed_mentions:
if previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.merge(allowed_mentions).to_dict()
else:
payload['allowed_mentions'] = allowed_mentions.to_dict()
elif previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.to_dict()
return self._adapter.edit_webhook_message(message_id, payload=payload)
def delete_message(self, message_id):
"""|maybecoro|
Deletes a message owned by this webhook.
This is a lower level interface to :meth:`WebhookMessage.delete` in case
you only have an ID.
.. versionadded:: 1.6
Parameters
------------
message_id: :class:`int`
The message ID to delete.
Raises
-------
HTTPException
Deleting the message failed.
Forbidden
Deleted a message that is not yours.
"""
return self._adapter.delete_webhook_message(message_id)
| 34.900362 | 120 | 0.591305 |
import logging
import asyncio
import json
import time
import re
from urllib.parse import quote as _uriquote
import aiohttp
from . import utils
from .errors import InvalidArgument, HTTPException, Forbidden, NotFound, DiscordServerError
from .message import Message
from .enums import try_enum, WebhookType
from .user import BaseUser, User
from .asset import Asset
from .mixins import Hashable
__all__ = (
'WebhookAdapter',
'AsyncWebhookAdapter',
'RequestsWebhookAdapter',
'Webhook',
'WebhookMessage',
)
log = logging.getLogger(__name__)
class WebhookAdapter:
BASE = 'https://discord.com/api/v7'
def _prepare(self, webhook):
self._webhook_id = webhook.id
self._webhook_token = webhook.token
self._request_url = '{0.BASE}/webhooks/{1}/{2}'.format(self, webhook.id, webhook.token)
self.webhook = webhook
def is_async(self):
return False
def request(self, verb, url, payload=None, multipart=None):
raise NotImplementedError()
def delete_webhook(self, *, reason=None):
return self.request('DELETE', self._request_url, reason=reason)
def edit_webhook(self, *, reason=None, **payload):
return self.request('PATCH', self._request_url, payload=payload, reason=reason)
def edit_webhook_message(self, message_id, payload):
return self.request('PATCH', '{}/messages/{}'.format(self._request_url, message_id), payload=payload)
def delete_webhook_message(self, message_id):
return self.request('DELETE', '{}/messages/{}'.format(self._request_url, message_id))
def handle_execution_response(self, data, *, wait):
raise NotImplementedError()
async def _wrap_coroutine_and_cleanup(self, coro, cleanup):
try:
return await coro
finally:
cleanup()
def execute_webhook(self, *, payload, wait=False, file=None, files=None):
cleanup = None
if file is not None:
multipart = {
'file': (file.filename, file.fp, 'application/octet-stream'),
'payload_json': utils.to_json(payload)
}
data = None
cleanup = file.close
files_to_pass = [file]
elif files is not None:
multipart = {
'payload_json': utils.to_json(payload)
}
for i, file in enumerate(files):
multipart['file%i' % i] = (file.filename, file.fp, 'application/octet-stream')
data = None
def _anon():
for f in files:
f.close()
cleanup = _anon
files_to_pass = files
else:
data = payload
multipart = None
files_to_pass = None
url = '%s?wait=%d' % (self._request_url, wait)
maybe_coro = None
try:
maybe_coro = self.request('POST', url, multipart=multipart, payload=data, files=files_to_pass)
finally:
if maybe_coro is not None and cleanup is not None:
if not asyncio.iscoroutine(maybe_coro):
cleanup()
else:
maybe_coro = self._wrap_coroutine_and_cleanup(maybe_coro, cleanup)
return self.handle_execution_response(maybe_coro, wait=wait)
class AsyncWebhookAdapter(WebhookAdapter):
def __init__(self, session):
self.session = session
self.loop = asyncio.get_event_loop()
def is_async(self):
return True
async def request(self, verb, url, payload=None, multipart=None, *, files=None, reason=None):
headers = {}
data = None
files = files or []
if payload:
headers['Content-Type'] = 'application/json'
data = utils.to_json(payload)
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
base_url = url.replace(self._request_url, '/') or '/'
_id = self._webhook_id
for tries in range(5):
for file in files:
file.reset(seek=tries)
if multipart:
data = aiohttp.FormData()
for key, value in multipart.items():
if key.startswith('file'):
data.add_field(key, value[1], filename=value[0], content_type=value[2])
else:
data.add_field(key, value)
async with self.session.request(verb, url, headers=headers, data=data) as r:
log.debug('Webhook ID %s with %s %s has returned status code %s', _id, verb, base_url, r.status)
response = (await r.text(encoding='utf-8')) or None
if r.headers['Content-Type'] == 'application/json':
response = json.loads(response)
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
delta = utils._parse_ratelimit_header(r)
log.debug('Webhook ID %s has been pre-emptively rate limited, waiting %.2f seconds', _id, delta)
await asyncio.sleep(delta)
if 300 > r.status >= 200:
return response
if r.status == 429:
if not r.headers.get('Via'):
raise HTTPException(r, data)
retry_after = response['retry_after'] / 1000.0
log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', _id, retry_after)
await asyncio.sleep(retry_after)
continue
if r.status in (500, 502):
await asyncio.sleep(1 + tries * 2)
continue
if r.status == 403:
raise Forbidden(r, response)
elif r.status == 404:
raise NotFound(r, response)
else:
raise HTTPException(r, response)
if r.status >= 500:
raise DiscordServerError(r, response)
raise HTTPException(r, response)
async def handle_execution_response(self, response, *, wait):
data = await response
if not wait:
return data
state = _PartialWebhookState(self, self.webhook, parent=self.webhook._state)
return WebhookMessage(data=data, state=state, channel=self.webhook.channel)
class RequestsWebhookAdapter(WebhookAdapter):
def __init__(self, session=None, *, sleep=True):
import requests
self.session = session or requests
self.sleep = sleep
def request(self, verb, url, payload=None, multipart=None, *, files=None, reason=None):
headers = {}
data = None
files = files or []
if payload:
headers['Content-Type'] = 'application/json'
data = utils.to_json(payload)
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
if multipart is not None:
data = {'payload_json': multipart.pop('payload_json')}
base_url = url.replace(self._request_url, '/') or '/'
_id = self._webhook_id
for tries in range(5):
for file in files:
file.reset(seek=tries)
r = self.session.request(verb, url, headers=headers, data=data, files=multipart)
r.encoding = 'utf-8'
response = r.text or None
r.status = r.status_code
log.debug('Webhook ID %s with %s %s has returned status code %s', _id, verb, base_url, r.status)
if r.headers['Content-Type'] == 'application/json':
response = json.loads(response)
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429 and self.sleep:
delta = utils._parse_ratelimit_header(r)
log.debug('Webhook ID %s has been pre-emptively rate limited, waiting %.2f seconds', _id, delta)
time.sleep(delta)
if 300 > r.status >= 200:
return response
if r.status == 429:
if self.sleep:
if not r.headers.get('Via'):
raise HTTPException(r, data)
retry_after = response['retry_after'] / 1000.0
log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', _id, retry_after)
time.sleep(retry_after)
continue
else:
raise HTTPException(r, response)
if self.sleep and r.status in (500, 502):
time.sleep(1 + tries * 2)
continue
if r.status == 403:
raise Forbidden(r, response)
elif r.status == 404:
raise NotFound(r, response)
else:
raise HTTPException(r, response)
if r.status >= 500:
raise DiscordServerError(r, response)
raise HTTPException(r, response)
def handle_execution_response(self, response, *, wait):
if not wait:
return response
state = _PartialWebhookState(self, self.webhook, parent=self.webhook._state)
return WebhookMessage(data=response, state=state, channel=self.webhook.channel)
class _FriendlyHttpAttributeErrorHelper:
__slots__ = ()
def __getattr__(self, attr):
raise AttributeError('PartialWebhookState does not support http methods.')
class _PartialWebhookState:
__slots__ = ('loop', 'parent', '_webhook')
def __init__(self, adapter, webhook, parent):
self._webhook = webhook
if isinstance(parent, self.__class__):
self.parent = None
else:
self.parent = parent
try:
self.loop = adapter.loop
except AttributeError:
self.loop = None
def _get_guild(self, guild_id):
return None
def store_user(self, data):
return BaseUser(state=self, data=data)
@property
def http(self):
if self.parent is not None:
return self.parent.http
# Some data classes assign state.http and that should be kosher
# however, using it should result in a late-binding error.
return _FriendlyHttpAttributeErrorHelper()
def __getattr__(self, attr):
if self.parent is not None:
return getattr(self.parent, attr)
raise AttributeError('PartialWebhookState does not support {0!r}.'.format(attr))
class WebhookMessage(Message):
def edit(self, **fields):
return self._state._webhook.edit_message(self.id, **fields)
def _delete_delay_sync(self, delay):
time.sleep(delay)
return self._state._webhook.delete_message(self.id)
async def _delete_delay_async(self, delay):
async def inner_call():
await asyncio.sleep(delay)
try:
await self._state._webhook.delete_message(self.id)
except HTTPException:
pass
asyncio.ensure_future(inner_call(), loop=self._state.loop)
return await asyncio.sleep(0)
def delete(self, *, delay=None):
if delay is not None:
if self._state._webhook._adapter.is_async():
return self._delete_delay_async(delay)
else:
return self._delete_delay_sync(delay)
return self._state._webhook.delete_message(self.id)
async def invites():
raise NotImplementedError()
class Webhook(Hashable):
__slots__ = ('id', 'type', 'guild_id', 'channel_id', 'user', 'name',
'avatar', 'token', '_state', '_adapter')
def __init__(self, data, *, adapter, state=None):
self.id = int(data['id'])
self.type = try_enum(WebhookType, int(data['type']))
self.channel_id = utils._get_as_snowflake(data, 'channel_id')
self.guild_id = utils._get_as_snowflake(data, 'guild_id')
self.name = data.get('name')
self.avatar = data.get('avatar')
self.token = data.get('token')
self._state = state or _PartialWebhookState(adapter, self, parent=state)
self._adapter = adapter
self._adapter._prepare(self)
user = data.get('user')
if user is None:
self.user = None
elif state is None:
self.user = BaseUser(state=None, data=user)
else:
self.user = User(state=state, data=user)
def __repr__(self):
return '<Webhook id=%r>' % self.id
@property
def url(self):
return 'https://discord.com/api/webhooks/{}/{}'.format(self.id, self.token)
@classmethod
def partial(cls, id, token, *, adapter):
if not isinstance(adapter, WebhookAdapter):
raise TypeError('adapter must be a subclass of WebhookAdapter')
data = {
'id': id,
'type': 1,
'token': token
}
return cls(data, adapter=adapter)
@classmethod
def from_url(cls, url, *, adapter):
m = re.search(r'discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,20})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})', url)
if m is None:
raise InvalidArgument('Invalid webhook URL given.')
data = m.groupdict()
data['type'] = 1
return cls(data, adapter=adapter)
@classmethod
def _as_follower(cls, data, *, channel, user):
name = "{} #{}".format(channel.guild, channel)
feed = {
'id': data['webhook_id'],
'type': 2,
'name': name,
'channel_id': channel.id,
'guild_id': channel.guild.id,
'user': {
'username': user.name,
'discriminator': user.discriminator,
'id': user.id,
'avatar': user.avatar
}
}
session = channel._state.http._HTTPClient__session
return cls(feed, adapter=AsyncWebhookAdapter(session=session))
@classmethod
def from_state(cls, data, state):
session = state.http._HTTPClient__session
return cls(data, adapter=AsyncWebhookAdapter(session=session), state=state)
@property
def guild(self):
return self._state._get_guild(self.guild_id)
@property
def channel(self):
guild = self.guild
return guild and guild.get_channel(self.channel_id)
@property
def created_at(self):
return utils.snowflake_time(self.id)
@property
def avatar_url(self):
return self.avatar_url_as()
def avatar_url_as(self, *, format=None, size=1024):
if self.avatar is None:
# Default is always blurple apparently
return Asset(self._state, '/embed/avatars/0.png')
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 1024")
format = format or 'png'
if format not in ('png', 'jpg', 'jpeg'):
raise InvalidArgument("format must be one of 'png', 'jpg', or 'jpeg'.")
url = '/avatars/{0.id}/{0.avatar}.{1}?size={2}'.format(self, format, size)
return Asset(self._state, url)
def delete(self, *, reason=None):
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
return self._adapter.delete_webhook(reason=reason)
def edit(self, *, reason=None, **kwargs):
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
payload = {}
try:
name = kwargs['name']
except KeyError:
pass
else:
if name is not None:
payload['name'] = str(name)
else:
payload['name'] = None
try:
avatar = kwargs['avatar']
except KeyError:
pass
else:
if avatar is not None:
payload['avatar'] = utils._bytes_to_base64_data(avatar)
else:
payload['avatar'] = None
return self._adapter.edit_webhook(reason=reason, **payload)
def send(self, content=None, *, wait=False, username=None, avatar_url=None, tts=False,
file=None, files=None, embed=None, embeds=None, allowed_mentions=None):
payload = {}
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
if files is not None and file is not None:
raise InvalidArgument('Cannot mix file and files keyword arguments.')
if embeds is not None and embed is not None:
raise InvalidArgument('Cannot mix embed and embeds keyword arguments.')
if embeds is not None:
if len(embeds) > 10:
raise InvalidArgument('embeds has a maximum of 10 elements.')
payload['embeds'] = [e.to_dict() for e in embeds]
if embed is not None:
payload['embeds'] = [embed.to_dict()]
if content is not None:
payload['content'] = str(content)
payload['tts'] = tts
if avatar_url:
payload['avatar_url'] = str(avatar_url)
if username:
payload['username'] = username
previous_mentions = getattr(self._state, 'allowed_mentions', None)
if allowed_mentions:
if previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.merge(allowed_mentions).to_dict()
else:
payload['allowed_mentions'] = allowed_mentions.to_dict()
elif previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.to_dict()
return self._adapter.execute_webhook(wait=wait, file=file, files=files, payload=payload)
def execute(self, *args, **kwargs):
return self.send(*args, **kwargs)
def edit_message(self, message_id, **fields):
payload = {}
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
try:
content = fields['content']
except KeyError:
pass
else:
if content is not None:
content = str(content)
payload['content'] = content
# Check if the embeds interface is being used
try:
embeds = fields['embeds']
except KeyError:
# Nope
pass
else:
if embeds is None or len(embeds) > 10:
raise InvalidArgument('embeds has a maximum of 10 elements')
payload['embeds'] = [e.to_dict() for e in embeds]
try:
embed = fields['embed']
except KeyError:
pass
else:
if 'embeds' in payload:
raise InvalidArgument('Cannot mix embed and embeds keyword arguments')
if embed is None:
payload['embeds'] = []
else:
payload['embeds'] = [embed.to_dict()]
allowed_mentions = fields.pop('allowed_mentions', None)
previous_mentions = getattr(self._state, 'allowed_mentions', None)
if allowed_mentions:
if previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.merge(allowed_mentions).to_dict()
else:
payload['allowed_mentions'] = allowed_mentions.to_dict()
elif previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.to_dict()
return self._adapter.edit_webhook_message(message_id, payload=payload)
def delete_message(self, message_id):
return self._adapter.delete_webhook_message(message_id)
| true | true |
1c3b3f2fffd739fa1a927fa3885d6fd5b1a33223 | 12,305 | py | Python | runtime/test/specs/V1_3/avg_pool_quant8_signed.mod.py | aosp-goes-brrbrr/packages_modules_NeuralNetworks | 87a14e21ce905ce7c4584fe9a53e4397a4d33c67 | [
"Apache-2.0"
] | 162 | 2018-03-30T00:57:00.000Z | 2022-01-28T08:04:55.000Z | runtime/test/specs/V1_3/avg_pool_quant8_signed.mod.py | aosp-goes-brrbrr/packages_modules_NeuralNetworks | 87a14e21ce905ce7c4584fe9a53e4397a4d33c67 | [
"Apache-2.0"
] | 1,347 | 2018-03-29T02:24:39.000Z | 2021-09-16T07:44:59.000Z | test/cts/tool/CTSConverter/src/nn/specs/V1_3/avg_pool_quant8_signed.mod.py | ibelem/webml-polyfill | aaf1ba4f5357eaf6e89bf9990f5bdfb543cd2bc2 | [
"Apache-2.0"
] | 71 | 2018-04-02T05:40:28.000Z | 2022-03-14T04:19:05.000Z | #
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128")
cons1 = Int32Scalar("cons1", 1)
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
o = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128")
model = model.Operation("AVERAGE_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act).To(o)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[-127, -126, -125, -124]}
output0 = {o: # output 0
[-127, -126, -125, -124]}
# Instantiate an example
Example((input0, output0))
#######################################################
model = Model()
bat = 5
row = 52
col = 60
chn = 3
i0 = Input("i0", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 0.5f, -128" % (bat, row, col, chn))
std = 5
flt = 10
pad = 5
stride = Int32Scalar("stride", std)
filt = Int32Scalar("filter", flt)
padding = Int32Scalar("padding", pad)
act0 = Int32Scalar("activation", 0)
output_row = (row + 2 * pad - flt + std) // std
output_col = (col + 2 * pad - flt + std) // std
output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED",
"{%d, %d, %d, %d}, 0.5f, -128" % (bat, output_row, output_col, chn))
model = model.Operation(
"AVERAGE_POOL_2D", i0, padding, padding, padding, padding, stride, stride, filt, filt, act0).To(output)
# Example 1. Input in operand 0,
input_values = [127 for _ in range(bat * row * col * chn)]
input0 = {i0: input_values}
output_values = [127 for _ in range(bat * output_row * output_col * chn)]
output0 = {output: output_values}
# Instantiate an example
Example((input0, output0))
#######################################################
model = Model()
bat = 1
row = 100
col = 100
chn = 1
i0 = Input("i0", "TENSOR_QUANT8_ASYMM_SIGNED", "{%d, %d, %d, %d}, 0.5f, -128" % (bat, row, col, chn))
std = 4
flt = 10
pad = 0
stride = Int32Scalar("stride", std)
filt = Int32Scalar("filter", flt)
padding = Int32Scalar("padding", pad)
act0 = Int32Scalar("activation", 0)
output_row = (row + 2 * pad - flt + std) // std
output_col = (col + 2 * pad - flt + std) // std
output = Output("output", "TENSOR_QUANT8_ASYMM_SIGNED",
"{%d, %d, %d, %d}, 0.5f, -128" % (bat, output_row, output_col, chn))
model = model.Operation(
"AVERAGE_POOL_2D", i0, padding, padding, padding, padding, stride, stride, filt, filt, act0).To(output)
# Example 1. Input in operand 0,
input_values = [x % 4 * 2 - 128 for x in range(bat * row * col * chn)]
input0 = {i0: input_values}
output_values = [-125 for _ in range(bat * output_row * output_col * chn)]
output0 = {output: output_values}
# Instantiate an example
Example((input0, output0))
#######################################################
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 3, 1}, 0.5f, -128")
cons1 = Int32Scalar("cons1", 1)
pad0 = Int32Scalar("pad0", 0)
act2 = Int32Scalar("relu1_activitation", 2)
o = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 3, 3, 1}, 0.5f, -128")
model = model.Operation("AVERAGE_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act2).To(o)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[-128, -127, -126, -125, -124, -123, -122, -121, -120]}
output0 = {o: # output 0
[-128, -127, -126, -126, -126, -126, -126, -126, -126]}
# Instantiate an example
Example((input0, output0))
#######################################################
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 4, 1}, 0.0625f, -128") # input 0
cons2 = Int32Scalar("cons2", 2)
pad_same = Int32Scalar("pad_same", 1)
act_none = Int32Scalar("act_none", 0)
i3 = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 1, 2, 1}, 0.0625f, -128") # output 0
model = model.Operation("AVERAGE_POOL_2D", i1, pad_same, cons2, cons2, cons2, cons2, act_none).To(i3)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[-128, -32, -96, -64, -80, -96, 32, -16]}
output0 = {i3: # output 0
[-84, -36]}
# Instantiate an example
Example((input0, output0))
#######################################################
layout = BoolScalar("layout", False) # NHWC
# TEST 1: AVERAGE_POOL_2D_NCHW_1, pad = 0, stride = 1, filter = 1, act = none
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
o1 = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
Model().Operation("AVERAGE_POOL_2D", i1, 0, 0, 0, 0, 1, 1, 1, 1, 0, layout).To(o1)
# Additional data type
quant8_signed = DataTypeConverter().Identify({
i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
})
# Instantiate an example
example = Example({
i1: [1.0, 2.0, 3.0, 4.0],
o1: [1.0, 2.0, 3.0, 4.0]
}).AddNchw(i1, o1, layout).AddVariations(quant8_signed, includeDefault=False)
#######################################################
# TEST 2: AVERAGE_POOL_2D_NCHW_2, act = none
bat = 5
row = 52
col = 60
chn = 3
std = 5
flt = 100
pad = 50
output_row = (row + 2 * pad - flt + std) // std
output_col = (col + 2 * pad - flt + std) // std
i2 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
o2 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
Model().Operation("AVERAGE_POOL_2D", i2, pad, pad, pad, pad, std, std, flt, flt, 0, layout).To(o2)
# Additional data type
quant8_signed = DataTypeConverter().Identify({
i2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
o2: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
})
# Instantiate an example
example = Example({
i2: [1. for _ in range(bat * row * col * chn)],
o2: [1. for _ in range(bat * output_row * output_col * chn)]
}).AddNchw(i2, o2, layout).AddVariations(quant8_signed, includeDefault=False)
#######################################################
# TEST 3: AVERAGE_POOL_2D_NCHW_3, act = none
bat = 1
row = 200
col = 180
chn = 1
std = 2
flt = 10
pad = 0
output_row = (row + 2 * pad - flt + std) // std
output_col = (col + 2 * pad - flt + std) // std
i3 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
o3 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
Model().Operation("AVERAGE_POOL_2D", i3, pad, pad, pad, pad, std, std, flt, flt, 0, layout).To(o3)
# Additional data type
quant8_signed = DataTypeConverter().Identify({
i3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128)
})
# Instantiate an example
example = Example({
i3: [x % 2 for x in range(bat * row * col * chn)],
o3: [.5 for _ in range(bat * output_row * output_col * chn)]
}).AddNchw(i3, o3, layout).AddVariations(quant8_signed, includeDefault=False)
#######################################################
# TEST 4: AVERAGE_POOL_2D_NCHW_4, act = relu6
bat = 5
row = 52
col = 60
chn = 3
std = 5
flt = 100
pad = 50
output_row = (row + 2 * pad - flt + std) // std
output_col = (col + 2 * pad - flt + std) // std
i4 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn]))
o4 = Output("op4", ("TENSOR_FLOAT32", [bat, output_row, output_col, chn]))
Model().Operation("AVERAGE_POOL_2D", i4, pad, pad, pad, pad, std, std, flt, flt, 3, layout).To(o4)
# Additional data type
quant8_signed = DataTypeConverter().Identify({
i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128),
o4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, -128)
})
# Instantiate an example
example = Example({
i4: [10 for _ in range(bat * row * col * chn)],
o4: [6 for _ in range(bat * output_row * output_col * chn)]
}).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
#######################################################
# TEST 5: AVERAGE_POOL_2D_NCHW_5, pad = same, stride = 2, filter = 2, act = none
i5 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 4, 1}")
o5 = Output("op4", "TENSOR_FLOAT32", "{1, 1, 2, 1}")
Model().Operation("AVERAGE_POOL_2D", i5, 1, 2, 2, 2, 2, 0, layout).To(o5)
# Additional data type
quant8_signed = DataTypeConverter().Identify({
i5: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128),
o5: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -128)
})
# Instantiate an example
example = Example({
i5: [0, 6, 2, 4, 3, 2, 10, 7],
o5: [2.75, 5.75]
}).AddNchw(i5, o5, layout).AddVariations(quant8_signed, includeDefault=False)
#######################################################
# TEST 6: zero-sized input, explicit padding
# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
# Use ROI_ALIGN op to convert into zero-sized feature map.
i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
# AVERAGE_POOL_2D op with numBatches = 0.
o3 = Output("out", "TENSOR_FLOAT32", "{0, 1, 1, 1}") # out
model = model.Operation("AVERAGE_POOL_2D", zero_sized, 0, 0, 0, 0, 1, 1, 2, 2, 0, layout).To(o3)
quant8_signed = DataTypeConverter().Identify({
p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
})
Example({
i1: [1],
o1: [],
o2: [],
o3: [],
}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
#######################################################
# TEST 7: zero-sized input, implicit padding
# Use BOX_WITH_NMS_LIMIT op to generate a zero-sized internal tensor for box cooridnates.
p1 = Parameter("scores", "TENSOR_FLOAT32", "{1, 2}", [0.90, 0.10]) # scores
p2 = Parameter("roi", "TENSOR_FLOAT32", "{1, 8}", [1, 1, 10, 10, 0, 0, 10, 10]) # roi
o1 = Output("scoresOut", "TENSOR_FLOAT32", "{0}") # scores out
o2 = Output("classesOut", "TENSOR_INT32", "{0}") # classes out
tmp1 = Internal("roiOut", "TENSOR_FLOAT32", "{0, 4}") # roi out
tmp2 = Internal("batchSplitOut", "TENSOR_INT32", "{0}") # batch split out
model = Model("zero_sized").Operation("BOX_WITH_NMS_LIMIT", p1, p2, [0], 0.3, -1, 0, 0.4, 1.0, 0.3).To(o1, tmp1, o2, tmp2)
# Use ROI_ALIGN op to convert into zero-sized feature map.
i1 = Input("in", "TENSOR_FLOAT32", "{1, 1, 1, 1}")
zero_sized = Internal("featureMap", "TENSOR_FLOAT32", "{0, 2, 2, 1}")
model = model.Operation("ROI_ALIGN", i1, tmp1, tmp2, 2, 2, 2.0, 2.0, 4, 4, layout).To(zero_sized)
# AVERAGE_POOL_2D op with numBatches = 0.
o3 = Output("out", "TENSOR_FLOAT32", "{0, 2, 2, 1}") # out
model = model.Operation("AVERAGE_POOL_2D", zero_sized, 1, 1, 1, 2, 2, 0, layout).To(o3)
quant8_signed = DataTypeConverter().Identify({
p1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
p2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
o1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
tmp1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
i1: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
zero_sized: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0),
o3: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.1, 0)
})
Example({
i1: [1],
o1: [],
o2: [],
o3: [],
}).AddNchw(i1, zero_sized, o3, layout).AddVariations(quant8_signed, includeDefault=False)
| 35.157143 | 122 | 0.615278 |
model = Model()
i1 = Input("op1", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128")
cons1 = Int32Scalar("cons1", 1)
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
o = Output("op3", "TENSOR_QUANT8_ASYMM_SIGNED", "{1, 2, 2, 1}, 0.5f, -128")
model = model.Operation("AVERAGE_POOL_2D", i1, pad0, pad0, pad0, pad0, cons1, cons1, cons1, cons1, act).To(o)
input0 = {i1:
[-127, -126, -125, -124]}
output0 = {o:
[-127, -126, -125, -124]}
Example((input0, output0))
| true | true |
1c3b3fd8d6729793ea7481293b2a097ba62b056c | 556 | py | Python | test/date/test_time_util.py | jsrdzhk/hervenue | 698db40522f26983c63efa8d40fd461664340c95 | [
"MIT"
] | null | null | null | test/date/test_time_util.py | jsrdzhk/hervenue | 698db40522f26983c63efa8d40fd461664340c95 | [
"MIT"
] | null | null | null | test/date/test_time_util.py | jsrdzhk/hervenue | 698db40522f26983c63efa8d40fd461664340c95 | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*-coding:utf-8 -*-
"""
# File : test_time_util.py
# Time :2020/8/12 16:32
# Author :Rodney Cheung
"""
import unittest
from hervenue.date.time import TimeUtil
class TestTimeUtil(unittest.TestCase):
def test_format_seconds(self):
print(TimeUtil.format_seconds(100, '{:d}h{:d}m{:d}s'))
def test_get_current_timestamp(self):
print(TimeUtil.get_current_timestamp())
def test_now(self):
print(TimeUtil.now('%Y-%m-%d_%H-%M-%S'))
if __name__ == '__main__':
unittest.main()
| 20.592593 | 62 | 0.642086 |
import unittest
from hervenue.date.time import TimeUtil
class TestTimeUtil(unittest.TestCase):
def test_format_seconds(self):
print(TimeUtil.format_seconds(100, '{:d}h{:d}m{:d}s'))
def test_get_current_timestamp(self):
print(TimeUtil.get_current_timestamp())
def test_now(self):
print(TimeUtil.now('%Y-%m-%d_%H-%M-%S'))
if __name__ == '__main__':
unittest.main()
| true | true |
1c3b41bec652235497b2c6e34aa3a5e2c78c9305 | 870 | py | Python | sorting_searching/shell_sort/test_shell_sort.py | lvwuyunlifan/interactive-coding-challenges | 3a8ebbb4b337e5f4350f8166e101b9bb84228b1b | [
"Apache-2.0"
] | null | null | null | sorting_searching/shell_sort/test_shell_sort.py | lvwuyunlifan/interactive-coding-challenges | 3a8ebbb4b337e5f4350f8166e101b9bb84228b1b | [
"Apache-2.0"
] | null | null | null | sorting_searching/shell_sort/test_shell_sort.py | lvwuyunlifan/interactive-coding-challenges | 3a8ebbb4b337e5f4350f8166e101b9bb84228b1b | [
"Apache-2.0"
] | null | null | null | from nose.tools import assert_equal, assert_raises
class TestShellSort(object):
def test_shell_sort(self):
shell_sort = ShellSort()
print('None input')
assert_raises(TypeError, shell_sort.sort, None)
print('Empty input')
assert_equal(shell_sort.sort([]), [])
print('One element')
assert_equal(shell_sort.sort([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
assert_equal(shell_sort.sort(data, "hibbard"), sorted(data))
print('Success hibbard: test_shell_sort\n')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
assert_equal(shell_sort.sort(data, "ciura"), sorted(data))
print('Success ciura: test_shell_sort\n')
def main():
test = TestShellSort()
test.test_shell_sort()
if __name__ == '__main__':
main() | 24.857143 | 68 | 0.597701 | from nose.tools import assert_equal, assert_raises
class TestShellSort(object):
def test_shell_sort(self):
shell_sort = ShellSort()
print('None input')
assert_raises(TypeError, shell_sort.sort, None)
print('Empty input')
assert_equal(shell_sort.sort([]), [])
print('One element')
assert_equal(shell_sort.sort([5]), [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
assert_equal(shell_sort.sort(data, "hibbard"), sorted(data))
print('Success hibbard: test_shell_sort\n')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
assert_equal(shell_sort.sort(data, "ciura"), sorted(data))
print('Success ciura: test_shell_sort\n')
def main():
test = TestShellSort()
test.test_shell_sort()
if __name__ == '__main__':
main() | true | true |
1c3b4201dcb24ae42a1236b245c080443b02c477 | 4,984 | py | Python | src/python/module/nifty/graph/agglo/__init__.py | k-dominik/nifty | 067e137e9c1f33cccb22052b53ff0d75c288d667 | [
"MIT"
] | null | null | null | src/python/module/nifty/graph/agglo/__init__.py | k-dominik/nifty | 067e137e9c1f33cccb22052b53ff0d75c288d667 | [
"MIT"
] | null | null | null | src/python/module/nifty/graph/agglo/__init__.py | k-dominik/nifty | 067e137e9c1f33cccb22052b53ff0d75c288d667 | [
"MIT"
] | 1 | 2018-02-07T09:29:26.000Z | 2018-02-07T09:29:26.000Z | from __future__ import absolute_import
from . import _agglo as __agglo
from ._agglo import *
import numpy
__all__ = []
for key in __agglo.__dict__.keys():
__all__.append(key)
try:
__agglo.__dict__[key].__module__='nifty.graph.agglo'
except:
pass
from ...tools import makeDense as __makeDense
def updatRule(name, **kwargs):
if name == 'max':
return MaxSettings()
elif name == 'min':
return MinSettings()
elif name == 'mean':
return ArithmeticMeanSettings()
elif name in ['gmean', 'generalized_mean']:
p = kwargs.get('p',1.0)
return GeneralizedMeanSettings(p=float(p))
elif name in ['smax', 'smooth_max']:
p = kwargs.get('p',0.0)
return SmoothMaxSettings(p=float(p))
elif name in ['rank','quantile', 'rank_order']:
q = kwargs.get('q',0.5)
numberOfBins = kwargs.get('numberOfBins',40)
return RankOrderSettings(q=float(q), numberOfBins=int(numberOfBins))
else:
return NotImplementedError("not yet implemented")
# def fixationClusterPolicy(graph,
# mergePrios=None,
# notMergePrios=None,
# edgeSizes=None,
# isLocalEdge=None,
# updateRule0="smooth_max",
# updateRule1="smooth_max",
# p0=float('inf'),
# p1=float('inf'),
# zeroInit=False):
# if isLocalEdge is None:
# raise RuntimeError("`isLocalEdge` must not be none")
# if mergePrios is None and if notMergePrios is None:
# raise RuntimeError("`mergePrios` and `notMergePrios` cannot be both None")
# if mergePrio is None:
# nmp = notMergePrios.copy()
# nmp -= nmp.min()
# nmp /= nmp.max()
# mp = 1.0 = nmp
# elif notMergePrios is None:
# mp = notMergePrios.copy()
# mp -= mp.min()
# mp /= mp.max()
# nmp = 1.0 = mp
# else:
# mp = mergePrios
# nmp = notMergePrios
# if edgeSizes is None:
# edgeSizes = numpy.ones(graph.edgeIdUpperBound+1)
# if(updateRule0 == "histogram_rank" and updateRule1 == "histogram_rank"):
# return nifty.graph.agglo.rankFixationClusterPolicy(graph=graph,
# mergePrios=mp, notMergePrios=nmp,
# edgeSizes=edgeSizes, isMergeEdge=isLocalEdge,
# q0=p0, q1=p1, zeroInit=zeroInit)
# elif(updateRule0 in ["smooth_max","generalized_mean"] and updateRule1 in ["smooth_max","generalized_mean"]):
# return nifty.graph.agglo.generalizedMeanFixationClusterPolicy(graph=g,
# mergePrios=mp, notMergePrios=nmp,
# edgeSizes=edgeSizes, isMergeEdge=isLocalEdge,
# p0=p0, p1=p1, zeroInit=zeroInit)
def sizeLimitClustering(graph, nodeSizes, minimumNodeSize,
edgeIndicators=None,edgeSizes=None,
sizeRegularizer=0.001, gamma=0.999,
makeDenseLabels=False):
s = graph.edgeIdUpperBound + 1
def rq(data):
return numpy.require(data, 'float32')
nodeSizes = rq(nodeSizes)
if edgeIndicators is None:
edgeIndicators = numpy.ones(s,dtype='float32')
else:
edgeIndicators = rq(edgeIndicators)
if edgeSizes is None:
edgeSizes = numpy.ones(s,dtype='float32')
else:
edgeSizes = rq(edgeSizes)
cp = minimumNodeSizeClusterPolicy(graph, edgeIndicators=edgeIndicators,
edgeSizes=edgeSizes,
nodeSizes=nodeSizes,
minimumNodeSize=float(minimumNodeSize),
sizeRegularizer=float(sizeRegularizer),
gamma=float(gamma))
agglo = agglomerativeClustering(cp)
agglo.run()
labels = agglo.result()
if makeDenseLabels:
labels = __makeDense(labels)
return labels;
def ucmFeatures(graph, edgeIndicators, edgeSizes, nodeSizes,
sizeRegularizers = numpy.arange(0.1,1,0.1) ):
def rq(data):
return numpy.require(data, 'float32')
edgeIndicators = rq(edgeIndicators)
if edgeSizes is None:
edgeSizes = numpy.ones(s,dtype='float32')
else:
edgeSizes = rq(edgeSizes)
if nodeSizes is None:
nodeSizes = numpy.ones(s,dtype='float32')
else:
nodeSizes = rq(nodeSizes)
fOut = []
# policy
for sr in sizeRegularizers:
sr = float(sr)
cp = edgeWeightedClusterPolicyWithUcm(graph=graph, edgeIndicators=edgeIndicators,
edgeSizes=edgeSizes, nodeSizes=nodeSizes, sizeRegularizer=sr)
agglo = agglomerativeClustering(cp)
hA = agglo.runAndGetDendrogramHeight()[:,None]
hB = agglo.ucmTransform(cp.edgeIndicators)[:,None]
fOut.extend([hA,hB])
return numpy.concatenate(fOut, axis=1)
| 26.795699 | 114 | 0.588884 | from __future__ import absolute_import
from . import _agglo as __agglo
from ._agglo import *
import numpy
__all__ = []
for key in __agglo.__dict__.keys():
__all__.append(key)
try:
__agglo.__dict__[key].__module__='nifty.graph.agglo'
except:
pass
from ...tools import makeDense as __makeDense
def updatRule(name, **kwargs):
if name == 'max':
return MaxSettings()
elif name == 'min':
return MinSettings()
elif name == 'mean':
return ArithmeticMeanSettings()
elif name in ['gmean', 'generalized_mean']:
p = kwargs.get('p',1.0)
return GeneralizedMeanSettings(p=float(p))
elif name in ['smax', 'smooth_max']:
p = kwargs.get('p',0.0)
return SmoothMaxSettings(p=float(p))
elif name in ['rank','quantile', 'rank_order']:
q = kwargs.get('q',0.5)
numberOfBins = kwargs.get('numberOfBins',40)
return RankOrderSettings(q=float(q), numberOfBins=int(numberOfBins))
else:
return NotImplementedError("not yet implemented")
def sizeLimitClustering(graph, nodeSizes, minimumNodeSize,
edgeIndicators=None,edgeSizes=None,
sizeRegularizer=0.001, gamma=0.999,
makeDenseLabels=False):
s = graph.edgeIdUpperBound + 1
def rq(data):
return numpy.require(data, 'float32')
nodeSizes = rq(nodeSizes)
if edgeIndicators is None:
edgeIndicators = numpy.ones(s,dtype='float32')
else:
edgeIndicators = rq(edgeIndicators)
if edgeSizes is None:
edgeSizes = numpy.ones(s,dtype='float32')
else:
edgeSizes = rq(edgeSizes)
cp = minimumNodeSizeClusterPolicy(graph, edgeIndicators=edgeIndicators,
edgeSizes=edgeSizes,
nodeSizes=nodeSizes,
minimumNodeSize=float(minimumNodeSize),
sizeRegularizer=float(sizeRegularizer),
gamma=float(gamma))
agglo = agglomerativeClustering(cp)
agglo.run()
labels = agglo.result()
if makeDenseLabels:
labels = __makeDense(labels)
return labels;
def ucmFeatures(graph, edgeIndicators, edgeSizes, nodeSizes,
sizeRegularizers = numpy.arange(0.1,1,0.1) ):
def rq(data):
return numpy.require(data, 'float32')
edgeIndicators = rq(edgeIndicators)
if edgeSizes is None:
edgeSizes = numpy.ones(s,dtype='float32')
else:
edgeSizes = rq(edgeSizes)
if nodeSizes is None:
nodeSizes = numpy.ones(s,dtype='float32')
else:
nodeSizes = rq(nodeSizes)
fOut = []
for sr in sizeRegularizers:
sr = float(sr)
cp = edgeWeightedClusterPolicyWithUcm(graph=graph, edgeIndicators=edgeIndicators,
edgeSizes=edgeSizes, nodeSizes=nodeSizes, sizeRegularizer=sr)
agglo = agglomerativeClustering(cp)
hA = agglo.runAndGetDendrogramHeight()[:,None]
hB = agglo.ucmTransform(cp.edgeIndicators)[:,None]
fOut.extend([hA,hB])
return numpy.concatenate(fOut, axis=1)
| true | true |
1c3b4260dea513390afa15e7929a2b06413213e5 | 231 | py | Python | test.py | widodom/hello-world | 95a9e650b89bae2c2cd40fec82aff97a778bab6f | [
"MIT"
] | null | null | null | test.py | widodom/hello-world | 95a9e650b89bae2c2cd40fec82aff97a778bab6f | [
"MIT"
] | null | null | null | test.py | widodom/hello-world | 95a9e650b89bae2c2cd40fec82aff97a778bab6f | [
"MIT"
] | null | null | null |
def foo():
print('foo is running')
print("more print statements")
print('and more')
print('dangling')
def bar():
print('bar is running')
print('still in bar')
print('bar still running')
bar()
foo()
| 11 | 34 | 0.588745 |
def foo():
print('foo is running')
print("more print statements")
print('and more')
print('dangling')
def bar():
print('bar is running')
print('still in bar')
print('bar still running')
bar()
foo()
| true | true |
1c3b4272f1b549b9a0cd73b55c4038f19a4426a2 | 76 | py | Python | wd_extractor/adder.py | DuaneNielsen/wd_extractor | 128a189bacd0cd2d7f1fa598202b9c4e55f48e2f | [
"CC-BY-3.0"
] | null | null | null | wd_extractor/adder.py | DuaneNielsen/wd_extractor | 128a189bacd0cd2d7f1fa598202b9c4e55f48e2f | [
"CC-BY-3.0"
] | null | null | null | wd_extractor/adder.py | DuaneNielsen/wd_extractor | 128a189bacd0cd2d7f1fa598202b9c4e55f48e2f | [
"CC-BY-3.0"
] | null | null | null | class adder:
def add(self,first,second):
return first + second
| 15.2 | 31 | 0.631579 | class adder:
def add(self,first,second):
return first + second
| true | true |
1c3b43202c884d8374332b03a7837d2c4aced8cb | 1,177 | py | Python | venv/lib/python2.7/site-packages/pip/vendor/html5lib/trie/datrie.py | MissCatLady/AlarmEZ | 3942f0b9bb1f7eafb009b3a93df00320c7f74218 | [
"MIT"
] | 5 | 2015-05-04T16:11:56.000Z | 2018-09-13T22:03:55.000Z | venv/lib/python2.7/site-packages/pip/vendor/html5lib/trie/datrie.py | MissCatLady/AlarmEZ | 3942f0b9bb1f7eafb009b3a93df00320c7f74218 | [
"MIT"
] | 2 | 2015-06-21T17:38:11.000Z | 2015-06-22T20:54:42.000Z | venv/lib/python2.7/site-packages/pip/vendor/html5lib/trie/datrie.py | MissCatLady/AlarmEZ | 3942f0b9bb1f7eafb009b3a93df00320c7f74218 | [
"MIT"
] | 2 | 2017-10-11T16:47:08.000Z | 2021-09-04T20:11:50.000Z | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from pip.vendor.six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| 26.155556 | 66 | 0.64486 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from pip.vendor.six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.